{ "context": { "python_version": "3.12.9 | packaged by Anaconda, Inc. | (main, Feb 6 2025, 18:56:27) [GCC 11.2.0]", "torch_version": "2.5.1+cu124", "engine_args": { "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "served_model_name": null, "tokenizer": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "task": "auto", "skip_tokenizer_init": false, "tokenizer_mode": "auto", "trust_remote_code": false, "allowed_local_media_path": null, "download_dir": null, "load_format": "dummy", "config_format": "auto", "dtype": "auto", "kv_cache_dtype": "auto", "seed": 0, "max_model_len": null, "distributed_executor_backend": null, "pipeline_parallel_size": 1, "tensor_parallel_size": 1, "max_parallel_loading_workers": null, "block_size": null, "enable_prefix_caching": false, "disable_sliding_window": false, "use_v2_block_manager": true, "swap_space": 4, "cpu_offload_gb": 0, "gpu_memory_utilization": 0.9, "max_num_batched_tokens": 8000, "max_num_partial_prefills": 1, "max_long_partial_prefills": 1, "long_prefill_token_threshold": 0, "max_num_seqs": 256, "max_logprobs": 20, "disable_log_stats": false, "revision": null, "code_revision": null, "rope_scaling": null, "rope_theta": null, "hf_overrides": null, "tokenizer_revision": null, "quantization": null, "enforce_eager": true, "max_seq_len_to_capture": 8192, "disable_custom_all_reduce": false, "tokenizer_pool_size": 0, "tokenizer_pool_type": "ray", "tokenizer_pool_extra_config": null, "limit_mm_per_prompt": null, "mm_processor_kwargs": null, "disable_mm_preprocessor_cache": false, "enable_lora": false, "enable_lora_bias": false, "max_loras": 1, "max_lora_rank": 16, "enable_prompt_adapter": false, "max_prompt_adapters": 1, "max_prompt_adapter_token": 0, "fully_sharded_loras": false, "lora_extra_vocab_size": 256, "long_lora_scaling_factors": null, "lora_dtype": "auto", "max_cpu_loras": null, "device": "auto", "num_scheduler_steps": 1, "multi_step_stream_outputs": true, "ray_workers_use_nsight": false, "num_gpu_blocks_override": null, "num_lookahead_slots": 0, "model_loader_extra_config": null, "ignore_patterns": [], "preemption_mode": null, "scheduler_delay_factor": 0.0, "enable_chunked_prefill": null, "guided_decoding_backend": "xgrammar", "logits_processor_pattern": null, "speculative_model": null, "speculative_model_quantization": null, "speculative_draft_tensor_parallel_size": null, "num_speculative_tokens": null, "speculative_disable_mqa_scorer": false, "speculative_max_model_len": null, "speculative_disable_by_batch_size": null, "ngram_prompt_lookup_max": null, "ngram_prompt_lookup_min": null, "spec_decoding_acceptance_method": "rejection_sampler", "typical_acceptance_sampler_posterior_threshold": null, "typical_acceptance_sampler_posterior_alpha": null, "qlora_adapter_name_or_path": null, "disable_logprobs_during_spec_decoding": null, "otlp_traces_endpoint": null, "collect_detailed_traces": null, "disable_async_output_proc": false, "scheduling_policy": "fcfs", "scheduler_cls": "vllm.core.scheduler.Scheduler", "override_neuron_config": null, "override_pooler_config": null, "compilation_config": null, "worker_cls": "auto", "kv_transfer_config": null, "generation_config": null, "override_generation_config": null, "enable_sleep_mode": false, "model_impl": "auto", "calculate_kv_scales": false, "additional_config": null }, "prompt_len": 0, "batch_size": 32, "num_steps": 2, "complete_num_requests_per_step": null, "save_chrome_traces_folder": null }, "prefill": { "metadata": { "num_running_seqs": null }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 88256.21899999998, "pct_cuda_time": 99.32397382916119, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 118.078, "pct_cuda_time": 0.1328855497627844, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 118.078, "pct_cuda_time": 0.1328855497627844, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 88093.56499999999, "pct_cuda_time": 99.14092223435847, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 2823.1910000000016, "pct_cuda_time": 3.177232745476255, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 65.119, "pct_cuda_time": 0.07328523615747859, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 2758.0720000000015, "pct_cuda_time": 3.1039475093187763, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 19399.708000000002, "pct_cuda_time": 21.832524795622273, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 8855.436999999996, "pct_cuda_time": 9.96595144002017, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 33.569, "pct_cuda_time": 0.037778714239628974, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 8821.868000000002, "pct_cuda_time": 9.928172725780547, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 1754.8890000000001, "pct_cuda_time": 1.9749605306463773, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 1754.8890000000001, "pct_cuda_time": 1.9749605306463773, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 2571.643, "pct_cuda_time": 2.8941394150359607, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 723.9259999999999, "pct_cuda_time": 0.8147098062092298, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 1799.0179999999998, "pct_cuda_time": 2.0246235197339457, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 48.69900000000001, "pct_cuda_time": 0.05480608909278476, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 6217.739, "pct_cuda_time": 6.997473409919758, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 34.464000000000006, "pct_cuda_time": 0.038785951549184454, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 6183.275, "pct_cuda_time": 6.958687458370575, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 65870.66600000001, "pct_cuda_time": 74.13116469325998, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 41256.30500000001, "pct_cuda_time": 46.43004430212327, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 33.532000000000004, "pct_cuda_time": 0.03773707426146858, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 41222.77300000001, "pct_cuda_time": 46.3923072278618, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 5635.51, "pct_cuda_time": 6.342230089802242, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 5635.51, "pct_cuda_time": 6.342230089802242, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 18978.850999999995, "pct_cuda_time": 21.358890301334455, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 37.215, "pct_cuda_time": 0.041881940195650516, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 18941.636000000002, "pct_cuda_time": 21.31700836113881, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 44.576, "pct_cuda_time": 0.05016604503993866, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 44.576, "pct_cuda_time": 0.05016604503993866, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 408.219, "pct_cuda_time": 0.4594116282339985, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 4.576, "pct_cuda_time": 0.005149852434107127, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 1.408, "pct_cuda_time": 0.00158456997972527, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 402.235, "pct_cuda_time": 0.45267720582016613, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 192.477, "pct_cuda_time": 0.21661454260481589, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 17.888, "pct_cuda_time": 0.020131241333327863, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 13.952, "pct_cuda_time": 0.01570164798091404, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 23.231, "pct_cuda_time": 0.02614427926065181, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 40.48, "pct_cuda_time": 0.04555638691710151, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 36.223, "pct_cuda_time": 0.04076553861902589, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 2.176, "pct_cuda_time": 0.0024488808777572355, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 22.464, "pct_cuda_time": 0.025281093767434985, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 32.895, "pct_cuda_time": 0.037020191394220706, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 3.168, "pct_cuda_time": 0.0035652824543818576, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 85270.357, "cuda_time_us": 88256.21899999998, "pct_cuda_time": 99.32397382916119, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 429.6, "cuda_time_us": 118.078, "pct_cuda_time": 0.1328855497627844, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 118.078, "pct_cuda_time": 0.1328855497627844, "trace": "index_select(bfloat16[128256, 4096], 0, int64[4096]) <- embedding(bfloat16[128256, 4096], int64[4096], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4870.876, "cuda_time_us": 2724.732, "pct_cuda_time": 3.066426512781814, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 335.291, "cuda_time_us": 65.119, "pct_cuda_time": 0.07328523615747859, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 65.119, "pct_cuda_time": 0.07328523615747859, "trace": "_C::rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 3603.857, "cuda_time_us": 595.8, "pct_cuda_time": 0.6705161888638607, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 509.481, "cuda_time_us": 272.477, "pct_cuda_time": 0.30664692781647895, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 271.741, "pct_cuda_time": 0.3058186298725316, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1168.231, "cuda_time_us": 53.791, "pct_cuda_time": 0.0605366504115071, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 53.791, "pct_cuda_time": 0.0605366504115071, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1254.507, "cuda_time_us": 77.247, "pct_cuda_time": 0.08693414575556671, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 21.856, "pct_cuda_time": 0.02459684763982635, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.111, "pct_cuda_time": 0.06089677995235375, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.001440518163386609, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 318.581, "cuda_time_us": 192.285, "pct_cuda_time": 0.2163984648803079, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.704, "pct_cuda_time": 0.000792284989862635, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 191.581, "pct_cuda_time": 0.21560617989044525, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 157.785, "cuda_time_us": 42.719, "pct_cuda_time": 0.04807616829821293, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 42.719, "pct_cuda_time": 0.04807616829821293, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 640.309, "cuda_time_us": 2021.094, "pct_cuda_time": 2.274548919462262, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 195.079, "cuda_time_us": 1264.3670000000002, "pct_cuda_time": 1.422924709911435, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1263.631, "pct_cuda_time": 1.4220964119674877, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 181.049, "cuda_time_us": 174.206, "pct_cuda_time": 0.1960522712272872, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 174.206, "pct_cuda_time": 0.1960522712272872, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 180.336, "cuda_time_us": 582.5210000000001, "pct_cuda_time": 0.6555719383235398, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 581.753, "pct_cuda_time": 0.6547076274255078, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2575.859, "cuda_time_us": 2695.324, "pct_cuda_time": 3.033330607978007, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.866, "cuda_time_us": 43.52, "pct_cuda_time": 0.04897761755514471, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.52, "pct_cuda_time": 0.04897761755514471, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1865.058, "cuda_time_us": 591.0, "pct_cuda_time": 0.6651142457511608, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 181.285, "cuda_time_us": 270.748, "pct_cuda_time": 0.30470110289109187, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 270.012, "pct_cuda_time": 0.30387280494714464, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 544.63, "cuda_time_us": 53.695, "pct_cuda_time": 0.0604286115492531, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 53.695, "pct_cuda_time": 0.0604286115492531, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 772.617, "cuda_time_us": 77.631, "pct_cuda_time": 0.08736630120458269, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.208, "pct_cuda_time": 0.024992990134757663, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.919, "pct_cuda_time": 0.06068070222784575, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0016926088419792658, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 205.415, "cuda_time_us": 188.926, "pct_cuda_time": 0.2126182301062332, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 188.158, "pct_cuda_time": 0.21175391920820122, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.934, "cuda_time_us": 43.487, "pct_cuda_time": 0.0489404791962449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.487, "pct_cuda_time": 0.0489404791962449, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 470.459, "cuda_time_us": 2017.317, "pct_cuda_time": 2.270298265475456, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.866, "cuda_time_us": 1261.903, "pct_cuda_time": 1.4201517124469158, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1261.167, "pct_cuda_time": 1.4193234145029683, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.002, "cuda_time_us": 173.918, "pct_cuda_time": 0.1957281546405252, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 173.918, "pct_cuda_time": 0.1957281546405252, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.829, "cuda_time_us": 581.496, "pct_cuda_time": 0.6544183983880153, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.000829423348762446, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 580.759, "pct_cuda_time": 0.653588975039253, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2555.169, "cuda_time_us": 2699.4210000000003, "pct_cuda_time": 3.0379413915056594, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.618, "cuda_time_us": 43.776, "pct_cuda_time": 0.049265721187822034, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.776, "pct_cuda_time": 0.049265721187822034, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1827.476, "cuda_time_us": 592.569, "pct_cuda_time": 0.6668800059061246, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.151, "cuda_time_us": 271.452, "pct_cuda_time": 0.30549338788095454, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 270.716, "pct_cuda_time": 0.30466508993700725, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 515.784, "cuda_time_us": 54.4, "pct_cuda_time": 0.061222021943930886, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.4, "pct_cuda_time": 0.061222021943930886, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 777.473, "cuda_time_us": 78.367, "pct_cuda_time": 0.08819459914852999, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.432, "pct_cuda_time": 0.025245080813350324, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.271, "pct_cuda_time": 0.06107684472277708, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.0018726736124025916, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 227.312, "cuda_time_us": 188.35, "pct_cuda_time": 0.2119699969327092, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 187.614, "pct_cuda_time": 0.21114169898876195, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.505, "cuda_time_us": 42.687, "pct_cuda_time": 0.04804015534412826, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 42.687, "pct_cuda_time": 0.04804015534412826, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 479.856, "cuda_time_us": 2020.3890000000001, "pct_cuda_time": 2.273755509067584, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.555, "cuda_time_us": 1265.423, "pct_cuda_time": 1.4241131373962288, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1264.687, "pct_cuda_time": 1.4232848394522815, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.121, "cuda_time_us": 172.862, "pct_cuda_time": 0.19453972715573123, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 172.862, "pct_cuda_time": 0.19453972715573123, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.808, "cuda_time_us": 582.104, "pct_cuda_time": 0.6551026445156239, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 581.368, "pct_cuda_time": 0.6542743465716767, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2515.008, "cuda_time_us": 2701.08, "pct_cuda_time": 3.039808438093986, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.694, "cuda_time_us": 44.287, "pct_cuda_time": 0.049840803048361526, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.287, "pct_cuda_time": 0.049840803048361526, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1813.567, "cuda_time_us": 592.183, "pct_cuda_time": 0.6664455996474783, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.644, "cuda_time_us": 270.652, "pct_cuda_time": 0.3045930640288379, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 269.916, "pct_cuda_time": 0.3037647660848906, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 520.422, "cuda_time_us": 54.175, "pct_cuda_time": 0.06096880586052308, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.175, "pct_cuda_time": 0.06096880586052308, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 762.595, "cuda_time_us": 78.591, "pct_cuda_time": 0.08844668982712264, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 21.952, "pct_cuda_time": 0.024704886502080345, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.199, "pct_cuda_time": 0.062121220391232364, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.001620582933809935, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 219.166, "cuda_time_us": 188.765, "pct_cuda_time": 0.21243703993099472, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 188.029, "pct_cuda_time": 0.21160874198704743, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.882, "cuda_time_us": 42.784, "pct_cuda_time": 0.0481493196111974, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 42.784, "pct_cuda_time": 0.0481493196111974, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.732, "cuda_time_us": 2021.826, "pct_cuda_time": 2.2753727157869488, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.554, "cuda_time_us": 1265.453, "pct_cuda_time": 1.4241468995406832, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0008271725391321543, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1264.718, "pct_cuda_time": 1.4233197270015512, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.377, "cuda_time_us": 173.021, "pct_cuda_time": 0.19471866652133943, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 173.021, "pct_cuda_time": 0.19471866652133943, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.924, "cuda_time_us": 583.352, "pct_cuda_time": 0.6565071497249259, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 582.616, "pct_cuda_time": 0.6556788517809785, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2434.6, "cuda_time_us": 2697.661, "pct_cuda_time": 3.0359606790310023, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.27, "cuda_time_us": 44.448, "pct_cuda_time": 0.0500219932236, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.448, "pct_cuda_time": 0.0500219932236, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1755.568, "cuda_time_us": 590.36, "pct_cuda_time": 0.6643939866694676, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.346, "cuda_time_us": 270.94, "pct_cuda_time": 0.3049171806155999, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 270.204, "pct_cuda_time": 0.30408888267165257, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.935, "cuda_time_us": 53.407, "pct_cuda_time": 0.060104494962491116, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 53.407, "pct_cuda_time": 0.060104494962491116, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 729.212, "cuda_time_us": 78.143, "pct_cuda_time": 0.08794250846993734, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.368, "pct_cuda_time": 0.02517305490518099, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.335, "pct_cuda_time": 0.061148870630946414, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.001620582933809935, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.727, "cuda_time_us": 187.86999999999998, "pct_cuda_time": 0.21142980262143923, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 187.134, "pct_cuda_time": 0.21060150467749192, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.647, "cuda_time_us": 42.751, "pct_cuda_time": 0.04811218125229759, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 42.751, "pct_cuda_time": 0.04811218125229759, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 462.727, "cuda_time_us": 2020.1020000000003, "pct_cuda_time": 2.2734325178856376, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.89, "cuda_time_us": 1264.4630000000002, "pct_cuda_time": 1.4230327487736891, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1263.727, "pct_cuda_time": 1.4222044508297418, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.303, "cuda_time_us": 173.438, "pct_cuda_time": 0.1951879603292552, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 173.438, "pct_cuda_time": 0.1951879603292552, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.605, "cuda_time_us": 582.201, "pct_cuda_time": 0.6552118087826931, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.000829423348762446, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 581.464, "pct_cuda_time": 0.6543823854339308, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2835.438, "cuda_time_us": 2697.7219999999998, "pct_cuda_time": 3.036029328724726, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.855, "cuda_time_us": 43.327, "pct_cuda_time": 0.04876041442582157, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.327, "pct_cuda_time": 0.04876041442582157, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2056.016, "cuda_time_us": 589.848, "pct_cuda_time": 0.6638177794041129, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.779, "cuda_time_us": 268.765, "pct_cuda_time": 0.30246942514265773, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 268.029, "pct_cuda_time": 0.3016411271987105, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 732.952, "cuda_time_us": 53.855, "pct_cuda_time": 0.06060867631967643, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 53.855, "pct_cuda_time": 0.06060867631967643, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 751.338, "cuda_time_us": 78.11, "pct_cuda_time": 0.08790537011103752, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.912, "pct_cuda_time": 0.025785275124620297, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.695, "pct_cuda_time": 0.0604286115492531, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.0016914834371641197, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 252.071, "cuda_time_us": 189.118, "pct_cuda_time": 0.21283430783074117, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.000829423348762446, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 188.381, "pct_cuda_time": 0.21200488448197874, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 95.395, "cuda_time_us": 43.071, "pct_cuda_time": 0.04847231079314424, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.071, "pct_cuda_time": 0.04847231079314424, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 506.606, "cuda_time_us": 2021.4759999999999, "pct_cuda_time": 2.2749788241016473, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 188.087, "cuda_time_us": 1265.359, "pct_cuda_time": 1.4240411114880593, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.769, "pct_cuda_time": 0.0008654363028471112, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1264.59, "pct_cuda_time": 1.4231756751852125, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.564, "cuda_time_us": 174.109, "pct_cuda_time": 0.19594310696021805, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 174.109, "pct_cuda_time": 0.19594310696021805, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.709, "cuda_time_us": 582.008, "pct_cuda_time": 0.65499460565337, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 581.272, "pct_cuda_time": 0.6541663077094227, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2427.192, "cuda_time_us": 2703.325, "pct_cuda_time": 3.042334971903988, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.827, "cuda_time_us": 43.52, "pct_cuda_time": 0.04897761755514471, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.52, "pct_cuda_time": 0.04897761755514471, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1748.293, "cuda_time_us": 591.5759999999999, "pct_cuda_time": 0.6657624789246848, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.728, "cuda_time_us": 270.428, "pct_cuda_time": 0.30434097335024524, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 269.692, "pct_cuda_time": 0.30351267540629795, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 537.799, "cuda_time_us": 53.631, "pct_cuda_time": 0.06035658564108377, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 53.631, "pct_cuda_time": 0.06035658564108377, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.099, "cuda_time_us": 78.23899999999999, "pct_cuda_time": 0.08805054733219132, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.464, "pct_cuda_time": 0.025281093767434985, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.303, "pct_cuda_time": 0.06111285767686174, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0016565958878946004, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.162, "cuda_time_us": 189.278, "pct_cuda_time": 0.21301437260116451, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 188.542, "pct_cuda_time": 0.21218607465721723, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.713, "cuda_time_us": 43.584, "pct_cuda_time": 0.049049643463314044, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.584, "pct_cuda_time": 0.049049643463314044, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.427, "cuda_time_us": 2024.645, "pct_cuda_time": 2.2785452319608446, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.888, "cuda_time_us": 1268.047, "pct_cuda_time": 1.4270661996311713, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1267.279, "pct_cuda_time": 1.4262018887331394, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.115, "cuda_time_us": 173.469, "pct_cuda_time": 0.19522284787852473, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 173.469, "pct_cuda_time": 0.19522284787852473, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.88, "cuda_time_us": 583.129, "pct_cuda_time": 0.6562561844511484, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0015125440715559396, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 581.785, "pct_cuda_time": 0.6547436403795924, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2514.651, "cuda_time_us": 2699.708, "pct_cuda_time": 3.038264382687606, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.205, "cuda_time_us": 43.327, "pct_cuda_time": 0.04876041442582157, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.327, "pct_cuda_time": 0.04876041442582157, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1798.769, "cuda_time_us": 590.9369999999999, "pct_cuda_time": 0.6650433452478066, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.829, "cuda_time_us": 270.3, "pct_cuda_time": 0.3041969215339066, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0014765311174712744, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 268.988, "pct_cuda_time": 0.30272039041643534, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 516.124, "cuda_time_us": 53.984, "pct_cuda_time": 0.06075385354083024, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 53.984, "pct_cuda_time": 0.06075385354083024, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 755.851, "cuda_time_us": 77.727, "pct_cuda_time": 0.0874743400668367, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.016, "pct_cuda_time": 0.024776912410249673, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.271, "pct_cuda_time": 0.06107684472277708, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.001620582933809935, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 211.814, "cuda_time_us": 188.926, "pct_cuda_time": 0.2126182301062332, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.001440518163386609, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 187.646, "pct_cuda_time": 0.21117771194284657, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.286, "cuda_time_us": 42.975, "pct_cuda_time": 0.048364271930890254, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 42.975, "pct_cuda_time": 0.048364271930890254, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 468.746, "cuda_time_us": 2022.469, "pct_cuda_time": 2.2760963510830874, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.908, "cuda_time_us": 1264.9430000000002, "pct_cuda_time": 1.423572943084959, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1264.207, "pct_cuda_time": 1.4227446451410117, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.213, "cuda_time_us": 173.086, "pct_cuda_time": 0.19479181783432392, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 173.086, "pct_cuda_time": 0.19479181783432392, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.02, "cuda_time_us": 584.4399999999999, "pct_cuda_time": 0.6577315901638044, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.002052738382825918, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 582.616, "pct_cuda_time": 0.6556788517809785, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2599.859, "cuda_time_us": 2704.891, "pct_cuda_time": 3.0440973558445066, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.986, "cuda_time_us": 44.255, "pct_cuda_time": 0.049804790094276866, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.255, "pct_cuda_time": 0.049804790094276866, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1816.855, "cuda_time_us": 591.703, "pct_cuda_time": 0.6659054053362083, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 162.82, "cuda_time_us": 270.49199999999996, "pct_cuda_time": 0.30441299925841453, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.001440518163386609, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 269.212, "pct_cuda_time": 0.30297248109502795, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 542.298, "cuda_time_us": 54.175, "pct_cuda_time": 0.06096880586052308, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.175, "pct_cuda_time": 0.06096880586052308, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 746.594, "cuda_time_us": 77.983, "pct_cuda_time": 0.08776244369951403, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.208, "pct_cuda_time": 0.024992990134757663, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.271, "pct_cuda_time": 0.06107684472277708, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0016926088419792658, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.735, "cuda_time_us": 189.053, "pct_cuda_time": 0.21276115651775673, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 188.317, "pct_cuda_time": 0.21193285857380942, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.758, "cuda_time_us": 43.392, "pct_cuda_time": 0.048833565738806047, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.392, "pct_cuda_time": 0.048833565738806047, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 566.976, "cuda_time_us": 2025.5410000000002, "pct_cuda_time": 2.2795535946752157, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 182.639, "cuda_time_us": 1266.7350000000001, "pct_cuda_time": 1.4255896685137004, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1265.999, "pct_cuda_time": 1.4247613705697528, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.377, "cuda_time_us": 174.046, "pct_cuda_time": 0.19587220645686387, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 174.046, "pct_cuda_time": 0.19587220645686387, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 207.861, "cuda_time_us": 584.76, "pct_cuda_time": 0.6580917197046512, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.001440518163386609, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 583.48, "pct_cuda_time": 0.6566512015412646, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2435.579, "cuda_time_us": 2714.8740000000003, "pct_cuda_time": 3.055332272114107, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.181, "cuda_time_us": 43.808, "pct_cuda_time": 0.049301734141906695, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.808, "pct_cuda_time": 0.049301734141906695, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1762.291, "cuda_time_us": 590.903, "pct_cuda_time": 0.6650050814840918, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.384, "cuda_time_us": 270.78, "pct_cuda_time": 0.30473711584517654, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 270.012, "pct_cuda_time": 0.30387280494714464, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 520.771, "cuda_time_us": 53.343, "pct_cuda_time": 0.060032469054321795, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 53.343, "pct_cuda_time": 0.060032469054321795, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 760.655, "cuda_time_us": 77.598, "pct_cuda_time": 0.08732916284568287, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.143, "pct_cuda_time": 0.02491983882177319, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.951, "pct_cuda_time": 0.06071671518193043, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0016926088419792658, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.171, "cuda_time_us": 189.182, "pct_cuda_time": 0.2129063337389105, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 188.446, "pct_cuda_time": 0.21207803579496323, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.486, "cuda_time_us": 42.879, "pct_cuda_time": 0.04825623306863625, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 42.879, "pct_cuda_time": 0.04825623306863625, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.931, "cuda_time_us": 2037.284, "pct_cuda_time": 2.292769223419472, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.25, "cuda_time_us": 1274.383, "pct_cuda_time": 1.4341967645399352, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.568, "pct_cuda_time": 0.0017646347501485962, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1272.815, "pct_cuda_time": 1.4324321297897866, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.973, "cuda_time_us": 175.997, "pct_cuda_time": 0.1980678712512133, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 175.997, "pct_cuda_time": 0.1980678712512133, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 157.806, "cuda_time_us": 586.904, "pct_cuda_time": 0.6605045876283238, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.343, "pct_cuda_time": 0.0015114186667407937, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 585.561, "pct_cuda_time": 0.658993168961583, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2518.675, "cuda_time_us": 2728.826, "pct_cuda_time": 3.071033920095021, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.875, "cuda_time_us": 43.903, "pct_cuda_time": 0.04940864759934554, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.903, "pct_cuda_time": 0.04940864759934554, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1822.853, "cuda_time_us": 604.952, "pct_cuda_time": 0.680815893732075, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.506, "cuda_time_us": 277.18, "pct_cuda_time": 0.31193970666210963, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.0018726736124025916, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 275.516, "pct_cuda_time": 0.31006703304970706, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 504.141, "cuda_time_us": 54.847, "pct_cuda_time": 0.06172507789630105, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.847, "pct_cuda_time": 0.06172507789630105, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 813.061, "cuda_time_us": 80.319, "pct_cuda_time": 0.09039138934769457, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.368, "pct_cuda_time": 0.02517305490518099, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.415, "pct_cuda_time": 0.06348971264644965, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0017286217960639308, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 223.842, "cuda_time_us": 192.606, "pct_cuda_time": 0.2167597198259697, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 191.87, "pct_cuda_time": 0.21593142188202238, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.63, "cuda_time_us": 42.751, "pct_cuda_time": 0.04811218125229759, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 42.751, "pct_cuda_time": 0.04811218125229759, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.07, "cuda_time_us": 2037.2199999999998, "pct_cuda_time": 2.2926971975113024, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 176.259, "cuda_time_us": 1273.9019999999998, "pct_cuda_time": 1.4336554448238499, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0014765311174712744, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1272.59, "pct_cuda_time": 1.4321789137063787, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.257, "cuda_time_us": 175.326, "pct_cuda_time": 0.19731272462025048, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 175.326, "pct_cuda_time": 0.19731272462025048, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.212, "cuda_time_us": 587.9920000000001, "pct_cuda_time": 0.6617290280672025, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.984, "pct_cuda_time": 0.0022328031532492442, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 586.008, "pct_cuda_time": 0.6594962249139531, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2549.993, "cuda_time_us": 2733.786, "pct_cuda_time": 3.0766159279781444, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.487, "cuda_time_us": 43.967, "pct_cuda_time": 0.04948067350751487, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.967, "pct_cuda_time": 0.04948067350751487, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1848.388, "cuda_time_us": 604.472, "pct_cuda_time": 0.6802756994208049, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 157.306, "cuda_time_us": 274.877, "pct_cuda_time": 0.30934789937282886, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0014045052093019437, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 273.629, "pct_cuda_time": 0.3079433941635269, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 532.393, "cuda_time_us": 55.135, "pct_cuda_time": 0.062049194483063036, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 55.135, "pct_cuda_time": 0.062049194483063036, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 722.763, "cuda_time_us": 79.999, "pct_cuda_time": 0.09003125980684791, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.624, "pct_cuda_time": 0.025461158537858315, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.903, "pct_cuda_time": 0.062913505381095, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0016565958878946004, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.873, "cuda_time_us": 194.461, "pct_cuda_time": 0.21884734575806516, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.888, "pct_cuda_time": 0.002124764290995248, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.573, "pct_cuda_time": 0.2167225814670699, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.211, "cuda_time_us": 43.487, "pct_cuda_time": 0.0489404791962449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.487, "pct_cuda_time": 0.0489404791962449, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 482.421, "cuda_time_us": 2041.8600000000001, "pct_cuda_time": 2.2979190758535792, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.62, "cuda_time_us": 1275.598, "pct_cuda_time": 1.4355641313903373, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0008631854932168196, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1274.831, "pct_cuda_time": 1.4347009458971205, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 117.788, "cuda_time_us": 176.19, "pct_cuda_time": 0.19828507438053644, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.19, "pct_cuda_time": 0.19828507438053644, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.266, "cuda_time_us": 590.072, "pct_cuda_time": 0.6640698700827056, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.0018006477042332614, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 588.472, "pct_cuda_time": 0.6622692223784723, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2697.515, "cuda_time_us": 2728.8289999999997, "pct_cuda_time": 3.071037296309466, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.578, "cuda_time_us": 43.776, "pct_cuda_time": 0.049265721187822034, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.776, "pct_cuda_time": 0.049265721187822034, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2001.529, "cuda_time_us": 602.713, "pct_cuda_time": 0.6782961123509634, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.9, "cuda_time_us": 275.325, "pct_cuda_time": 0.3098520807300142, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0014045052093019437, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 274.077, "pct_cuda_time": 0.3084475755207122, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 519.926, "cuda_time_us": 54.943, "pct_cuda_time": 0.061833116758555046, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.943, "pct_cuda_time": 0.061833116758555046, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 992.694, "cuda_time_us": 79.52000000000001, "pct_cuda_time": 0.08949219090039309, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.4, "pct_cuda_time": 0.025209067859265657, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.488, "pct_cuda_time": 0.0624464623828095, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.0018366606583179264, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 207.238, "cuda_time_us": 192.92499999999998, "pct_cuda_time": 0.2171187239620012, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.189, "pct_cuda_time": 0.21629042601805387, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.302, "cuda_time_us": 43.168, "pct_cuda_time": 0.04858147506021339, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.168, "pct_cuda_time": 0.04858147506021339, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.256, "cuda_time_us": 2039.172, "pct_cuda_time": 2.2948939877104677, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.466, "cuda_time_us": 1274.031, "pct_cuda_time": 1.4338006220450037, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0008271725391321543, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1273.296, "pct_cuda_time": 1.4329734495058717, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 105.933, "cuda_time_us": 176.765, "pct_cuda_time": 0.19893218214924527, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.765, "pct_cuda_time": 0.19893218214924527, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.214, "cuda_time_us": 588.3760000000001, "pct_cuda_time": 0.6621611835162184, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0015125440715559396, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.032, "pct_cuda_time": 0.6606486394446625, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2475.376, "cuda_time_us": 2730.2690000000002, "pct_cuda_time": 3.072657879243277, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.95, "cuda_time_us": 43.711, "pct_cuda_time": 0.04919256987483755, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.711, "pct_cuda_time": 0.04919256987483755, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1813.341, "cuda_time_us": 603.257, "pct_cuda_time": 0.6789083325704027, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.008, "cuda_time_us": 274.84499999999997, "pct_cuda_time": 0.30931188641874413, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 274.077, "pct_cuda_time": 0.3084475755207122, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 537.693, "cuda_time_us": 54.719, "pct_cuda_time": 0.061581026079962395, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.719, "pct_cuda_time": 0.061581026079962395, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 779.586, "cuda_time_us": 80.48, "pct_cuda_time": 0.09057257952293304, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 23.232, "pct_cuda_time": 0.026145404665466953, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.744, "pct_cuda_time": 0.06273456601548683, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0016926088419792658, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.372, "cuda_time_us": 193.213, "pct_cuda_time": 0.21744284054876317, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.477, "pct_cuda_time": 0.21661454260481589, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.882, "cuda_time_us": 43.68, "pct_cuda_time": 0.049157682325568025, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.68, "pct_cuda_time": 0.049157682325568025, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 443.152, "cuda_time_us": 2039.621, "pct_cuda_time": 2.295399294472468, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.786, "cuda_time_us": 1276.4950000000001, "pct_cuda_time": 1.4365736195095231, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.0019446995205719222, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1274.767, "pct_cuda_time": 1.434628919988951, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.234, "cuda_time_us": 176.03, "pct_cuda_time": 0.19810500961011313, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.03, "pct_cuda_time": 0.19810500961011313, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.814, "cuda_time_us": 587.096, "pct_cuda_time": 0.6607206653528317, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 586.36, "pct_cuda_time": 0.6598923674088845, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2469.798, "cuda_time_us": 2734.492, "pct_cuda_time": 3.077410463777637, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.734, "cuda_time_us": 43.519, "pct_cuda_time": 0.04897649215032956, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.519, "pct_cuda_time": 0.04897649215032956, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1792.862, "cuda_time_us": 607.385, "pct_cuda_time": 0.6835540036473247, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.071, "cuda_time_us": 276.702, "pct_cuda_time": 0.3114017631604699, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.665, "pct_cuda_time": 0.0018737990172177374, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 275.037, "pct_cuda_time": 0.3095279641432522, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 528.265, "cuda_time_us": 54.655, "pct_cuda_time": 0.06150900017179306, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.655, "pct_cuda_time": 0.06150900017179306, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 785.65, "cuda_time_us": 81.279, "pct_cuda_time": 0.09147177797023452, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.592, "pct_cuda_time": 0.025425145583773647, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.992, "pct_cuda_time": 0.06413907122478876, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.695, "pct_cuda_time": 0.0019075611616721111, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.911, "cuda_time_us": 194.749, "pct_cuda_time": 0.21917146234482712, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.0015485570256406045, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 193.373, "pct_cuda_time": 0.2176229053191865, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.502, "cuda_time_us": 43.168, "pct_cuda_time": 0.04858147506021339, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.168, "pct_cuda_time": 0.04858147506021339, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 452.492, "cuda_time_us": 2040.42, "pct_cuda_time": 2.296298492919769, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.801, "cuda_time_us": 1276.654, "pct_cuda_time": 1.4367525588751313, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0008271725391321543, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1275.919, "pct_cuda_time": 1.435925386335999, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 105.812, "cuda_time_us": 176.222, "pct_cuda_time": 0.19832108733462114, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.222, "pct_cuda_time": 0.19832108733462114, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.34, "cuda_time_us": 587.544, "pct_cuda_time": 0.661224846710017, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 586.776, "pct_cuda_time": 0.660360535811985, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2453.177, "cuda_time_us": 2730.041, "pct_cuda_time": 3.072401286945423, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.08, "cuda_time_us": 43.328, "pct_cuda_time": 0.04876153983063672, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.328, "pct_cuda_time": 0.04876153983063672, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1751.366, "cuda_time_us": 604.792, "pct_cuda_time": 0.6806358289616516, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.13, "cuda_time_us": 275.934, "pct_cuda_time": 0.31053745226243795, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.185, "pct_cuda_time": 0.0013336047059477592, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 274.749, "pct_cuda_time": 0.3092038475564902, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 503.821, "cuda_time_us": 54.111, "pct_cuda_time": 0.06089677995235375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.111, "pct_cuda_time": 0.06089677995235375, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.551, "cuda_time_us": 81.022, "pct_cuda_time": 0.09118254893274207, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 23.104, "pct_cuda_time": 0.026001352849128294, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.447, "pct_cuda_time": 0.06352572560053432, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.471, "pct_cuda_time": 0.0016554704830794547, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 210.141, "cuda_time_us": 193.72500000000002, "pct_cuda_time": 0.21801904781411788, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.002052738382825918, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 191.901, "pct_cuda_time": 0.21596630943129194, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.607, "cuda_time_us": 44.095, "pct_cuda_time": 0.04962472532385353, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.095, "pct_cuda_time": 0.04962472532385353, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.298, "cuda_time_us": 2037.826, "pct_cuda_time": 2.2933791928292813, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.131, "cuda_time_us": 1272.365, "pct_cuda_time": 1.4319256976229708, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.215, "pct_cuda_time": 0.001367366850402133, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1271.15, "pct_cuda_time": 1.4305583307725689, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.063, "cuda_time_us": 176.189, "pct_cuda_time": 0.19828394897572127, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.189, "pct_cuda_time": 0.19828394897572127, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.152, "cuda_time_us": 589.272, "pct_cuda_time": 0.6631695462305891, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.0018006477042332614, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.672, "pct_cuda_time": 0.6613688985263557, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2501.354, "cuda_time_us": 2731.641, "pct_cuda_time": 3.0742019346496563, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.506, "cuda_time_us": 44.511, "pct_cuda_time": 0.05009289372695419, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.511, "pct_cuda_time": 0.05009289372695419, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1804.408, "cuda_time_us": 603.958, "pct_cuda_time": 0.67969724134582, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.476, "cuda_time_us": 274.619, "pct_cuda_time": 0.3090575449305213, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0008271725391321543, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 273.884, "pct_cuda_time": 0.30823037239138906, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 548.861, "cuda_time_us": 54.847, "pct_cuda_time": 0.06172507789630105, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.847, "pct_cuda_time": 0.06172507789630105, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 762.153, "cuda_time_us": 80.22299999999998, "pct_cuda_time": 0.09028335048544056, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.719, "pct_cuda_time": 0.025568071995297162, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.16, "pct_cuda_time": 0.06320273441858747, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0015125440715559396, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.51, "cuda_time_us": 194.269, "pct_cuda_time": 0.21863126803355715, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0015125440715559396, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.925, "pct_cuda_time": 0.21711872396200124, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.805, "cuda_time_us": 43.071, "pct_cuda_time": 0.04847231079314424, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.071, "pct_cuda_time": 0.04847231079314424, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.457, "cuda_time_us": 2040.101, "pct_cuda_time": 2.295939488783738, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.33, "cuda_time_us": 1272.912, "pct_cuda_time": 1.4325412940568558, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.000829423348762446, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1272.175, "pct_cuda_time": 1.4317118707080931, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.777, "cuda_time_us": 176.285, "pct_cuda_time": 0.1983919878379753, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.285, "pct_cuda_time": 0.1983919878379753, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.786, "cuda_time_us": 590.904, "pct_cuda_time": 0.6650062068889069, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0014765311174712744, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 589.592, "pct_cuda_time": 0.6635296757714356, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2363.441, "cuda_time_us": 2729.338, "pct_cuda_time": 3.0716101273603758, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.474, "cuda_time_us": 43.583, "pct_cuda_time": 0.04904851805849889, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.583, "pct_cuda_time": 0.04904851805849889, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1666.131, "cuda_time_us": 604.4390000000001, "pct_cuda_time": 0.6802385610619052, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.538, "cuda_time_us": 275.74, "pct_cuda_time": 0.3103191237282997, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0008631854932168196, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 274.973, "pct_cuda_time": 0.3094559382350829, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 488.87, "cuda_time_us": 54.944, "pct_cuda_time": 0.06183424216337019, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.944, "pct_cuda_time": 0.06183424216337019, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 701.232, "cuda_time_us": 79.998, "pct_cuda_time": 0.09003013440203278, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.624, "pct_cuda_time": 0.025461158537858315, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.838, "pct_cuda_time": 0.06284035406811053, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0017286217960639308, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 187.454, "cuda_time_us": 193.757, "pct_cuda_time": 0.21805506076820252, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.989, "pct_cuda_time": 0.21719074987017056, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.094, "cuda_time_us": 43.871, "pct_cuda_time": 0.04937263464526088, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.871, "pct_cuda_time": 0.04937263464526088, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.816, "cuda_time_us": 2037.4450000000002, "pct_cuda_time": 2.2929504135947107, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.878, "cuda_time_us": 1273.679, "pct_cuda_time": 1.4334044795500727, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.856, "pct_cuda_time": 0.0020887513369105834, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1271.823, "pct_cuda_time": 1.431315728213162, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 109.206, "cuda_time_us": 175.454, "pct_cuda_time": 0.19745677643658913, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 175.454, "pct_cuda_time": 0.19745677643658913, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.063, "cuda_time_us": 588.312, "pct_cuda_time": 0.662089157608049, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.576, "pct_cuda_time": 0.6612608596641018, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2573.948, "cuda_time_us": 2734.811, "pct_cuda_time": 3.0777694679136687, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.433, "cuda_time_us": 45.151, "pct_cuda_time": 0.050813152808647497, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 45.151, "pct_cuda_time": 0.050813152808647497, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1857.078, "cuda_time_us": 607.225, "pct_cuda_time": 0.6833739388769013, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.281, "cuda_time_us": 276.798, "pct_cuda_time": 0.31150980202272394, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.601, "pct_cuda_time": 0.001801773109048407, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 275.197, "pct_cuda_time": 0.30970802891367555, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 599.489, "cuda_time_us": 54.719, "pct_cuda_time": 0.061581026079962395, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.719, "pct_cuda_time": 0.061581026079962395, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 764.71, "cuda_time_us": 81.087, "pct_cuda_time": 0.09125570024572655, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.719, "pct_cuda_time": 0.025568071995297162, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.896, "pct_cuda_time": 0.06403103236253477, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0016565958878946004, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.939, "cuda_time_us": 194.621, "pct_cuda_time": 0.21902741052848848, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0014765311174712744, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 193.309, "pct_cuda_time": 0.21755087941101717, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.911, "cuda_time_us": 42.687, "pct_cuda_time": 0.04804015534412826, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 42.687, "pct_cuda_time": 0.04804015534412826, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 498.79, "cuda_time_us": 2039.748, "pct_cuda_time": 2.2955422208839913, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 199.17, "cuda_time_us": 1275.0539999999999, "pct_cuda_time": 1.434951911170898, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0008271725391321543, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1274.319, "pct_cuda_time": 1.4341247386317657, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.093, "cuda_time_us": 175.998, "pct_cuda_time": 0.19806899665602845, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 175.998, "pct_cuda_time": 0.19806899665602845, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.55, "cuda_time_us": 588.696, "pct_cuda_time": 0.6625213130570651, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0015125440715559396, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.352, "pct_cuda_time": 0.6610087689855091, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2741.303, "cuda_time_us": 2731.132, "pct_cuda_time": 3.0736291035987473, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.138, "cuda_time_us": 43.551, "pct_cuda_time": 0.04901250510441423, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.551, "pct_cuda_time": 0.04901250510441423, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2052.812, "cuda_time_us": 605.049, "pct_cuda_time": 0.680925057999144, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.141, "cuda_time_us": 275.837, "pct_cuda_time": 0.3104282879953688, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 275.069, "pct_cuda_time": 0.30956397709733685, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 493.178, "cuda_time_us": 55.008, "pct_cuda_time": 0.061906268071539525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 55.008, "pct_cuda_time": 0.061906268071539525, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1033.347, "cuda_time_us": 80.383, "pct_cuda_time": 0.0904634152558639, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.463, "pct_cuda_time": 0.025279968362619847, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.32, "pct_cuda_time": 0.0633827991890108, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.0018006477042332614, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 239.317, "cuda_time_us": 193.821, "pct_cuda_time": 0.21812708667637185, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.76, "pct_cuda_time": 0.0019807124746565874, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.061, "pct_cuda_time": 0.21614637420171523, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.034, "cuda_time_us": 43.231, "pct_cuda_time": 0.04865237556356758, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.231, "pct_cuda_time": 0.04865237556356758, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.927, "cuda_time_us": 2039.301, "pct_cuda_time": 2.2950391649316213, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.796, "cuda_time_us": 1275.023, "pct_cuda_time": 1.4349170236216284, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.001440518163386609, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1273.743, "pct_cuda_time": 1.4334765054582417, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.233, "cuda_time_us": 175.198, "pct_cuda_time": 0.19716867280391184, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 175.198, "pct_cuda_time": 0.19716867280391184, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.662, "cuda_time_us": 589.0799999999999, "pct_cuda_time": 0.6629534685060808, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.0018366606583179264, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.448, "pct_cuda_time": 0.661116807847763, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2378.017, "cuda_time_us": 2730.009, "pct_cuda_time": 3.0723652739913385, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.518, "cuda_time_us": 44.575, "pct_cuda_time": 0.05016491963512351, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.575, "pct_cuda_time": 0.05016491963512351, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1700.608, "cuda_time_us": 604.7900000000001, "pct_cuda_time": 0.6806335781520214, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 161.199, "cuda_time_us": 276.06100000000004, "pct_cuda_time": 0.3106803786739615, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.184, "pct_cuda_time": 0.0013324793011326133, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 274.877, "pct_cuda_time": 0.30934789937282886, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.783, "cuda_time_us": 54.367, "pct_cuda_time": 0.061184883585031075, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.367, "pct_cuda_time": 0.061184883585031075, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.348, "cuda_time_us": 80.414, "pct_cuda_time": 0.09049830280513342, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.399, "pct_cuda_time": 0.025207942454450512, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.512, "pct_cuda_time": 0.06359887691351879, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.0016914834371641197, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.418, "cuda_time_us": 193.948, "pct_cuda_time": 0.21827001308789537, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.407, "pct_cuda_time": 0.0015834445749101243, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.541, "pct_cuda_time": 0.2166865685129852, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.21, "cuda_time_us": 43.199, "pct_cuda_time": 0.048616362609482905, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.199, "pct_cuda_time": 0.048616362609482905, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 456.753, "cuda_time_us": 2037.4450000000002, "pct_cuda_time": 2.2929504135947107, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.886, "cuda_time_us": 1272.8790000000001, "pct_cuda_time": 1.432504155697956, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1272.111, "pct_cuda_time": 1.431639844799924, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.672, "cuda_time_us": 176.03, "pct_cuda_time": 0.19810500961011313, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.03, "pct_cuda_time": 0.19810500961011313, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.334, "cuda_time_us": 588.536, "pct_cuda_time": 0.6623412482866415, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.001440518163386609, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.256, "pct_cuda_time": 0.660900730123255, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2465.179, "cuda_time_us": 2730.107, "pct_cuda_time": 3.0724755636632226, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.982, "cuda_time_us": 44.287, "pct_cuda_time": 0.049840803048361526, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.287, "pct_cuda_time": 0.049840803048361526, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1790.74, "cuda_time_us": 602.52, "pct_cuda_time": 0.6780789092216404, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.795, "cuda_time_us": 274.557, "pct_cuda_time": 0.30898776983198223, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 273.821, "pct_cuda_time": 0.30815947188803494, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 539.44, "cuda_time_us": 55.007, "pct_cuda_time": 0.06190514266672437, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 55.007, "pct_cuda_time": 0.06190514266672437, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 746.114, "cuda_time_us": 80.63799999999999, "pct_cuda_time": 0.09075039348372607, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.623, "pct_cuda_time": 0.02546003313304317, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.48, "pct_cuda_time": 0.06356286395943413, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.535, "pct_cuda_time": 0.0017274963912487851, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.997, "cuda_time_us": 192.31799999999998, "pct_cuda_time": 0.21643560323920769, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.000829423348762446, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 191.581, "pct_cuda_time": 0.21560617989044525, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.536, "cuda_time_us": 43.135, "pct_cuda_time": 0.04854433670131358, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.135, "pct_cuda_time": 0.04854433670131358, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.009, "cuda_time_us": 2040.165, "pct_cuda_time": 2.296011514691907, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.092, "cuda_time_us": 1275.855, "pct_cuda_time": 1.4358533604278296, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.002052738382825918, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1274.031, "pct_cuda_time": 1.4338006220450037, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.841, "cuda_time_us": 176.03, "pct_cuda_time": 0.19810500961011313, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.03, "pct_cuda_time": 0.19810500961011313, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.559, "cuda_time_us": 588.28, "pct_cuda_time": 0.6620531446539643, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.544, "pct_cuda_time": 0.661224846710017, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2459.247, "cuda_time_us": 2735.8060000000005, "pct_cuda_time": 3.078889245704739, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.899, "cuda_time_us": 43.871, "pct_cuda_time": 0.04937263464526088, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.871, "pct_cuda_time": 0.04937263464526088, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1741.533, "cuda_time_us": 606.267, "pct_cuda_time": 0.6822958010639917, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.396, "cuda_time_us": 276.54, "pct_cuda_time": 0.3112194475804163, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.002052738382825918, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 274.716, "pct_cuda_time": 0.30916670919759043, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 492.96, "cuda_time_us": 54.4, "pct_cuda_time": 0.061222021943930886, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.4, "pct_cuda_time": 0.061222021943930886, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 745.199, "cuda_time_us": 81.08800000000001, "pct_cuda_time": 0.09125682565054169, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.815, "pct_cuda_time": 0.025676110857551164, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.576, "pct_cuda_time": 0.06367090282168811, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.697, "pct_cuda_time": 0.0019098119713024028, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 209.036, "cuda_time_us": 194.239, "pct_cuda_time": 0.21859750588910276, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.000829423348762446, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 193.502, "pct_cuda_time": 0.21776808254034036, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.661, "cuda_time_us": 43.552, "pct_cuda_time": 0.04901363050922937, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.552, "pct_cuda_time": 0.04901363050922937, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 493.452, "cuda_time_us": 2042.1160000000002, "pct_cuda_time": 2.2982071794862566, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.275, "cuda_time_us": 1276.5590000000002, "pct_cuda_time": 1.4366456454176926, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1275.823, "pct_cuda_time": 1.435817347473745, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 114.936, "cuda_time_us": 176.094, "pct_cuda_time": 0.19817703551828245, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.094, "pct_cuda_time": 0.19817703551828245, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.588, "cuda_time_us": 589.463, "pct_cuda_time": 0.6633844985502818, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.343, "pct_cuda_time": 0.0015114186667407937, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 588.12, "pct_cuda_time": 0.6618730798835409, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2444.279, "cuda_time_us": 2735.3860000000004, "pct_cuda_time": 3.0784165756823776, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.534, "cuda_time_us": 44.863, "pct_cuda_time": 0.0504890362218855, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.863, "pct_cuda_time": 0.0504890362218855, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1770.241, "cuda_time_us": 606.5820000000001, "pct_cuda_time": 0.6826503035807627, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.767, "cuda_time_us": 275.964, "pct_cuda_time": 0.3105712144068923, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 275.228, "pct_cuda_time": 0.30974291646294505, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 550.439, "cuda_time_us": 55.583, "pct_cuda_time": 0.06255337584024835, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 55.583, "pct_cuda_time": 0.06255337584024835, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 728.451, "cuda_time_us": 80.79799999999999, "pct_cuda_time": 0.09093045825414939, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.719, "pct_cuda_time": 0.025568071995297162, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.607, "pct_cuda_time": 0.06370579037095764, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0016565958878946004, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.713, "cuda_time_us": 194.23700000000002, "pct_cuda_time": 0.2185952550794725, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.663, "pct_cuda_time": 0.0018715482075874457, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.574, "pct_cuda_time": 0.21672370687188502, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.964, "cuda_time_us": 43.295, "pct_cuda_time": 0.04872440147173691, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.295, "pct_cuda_time": 0.04872440147173691, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.452, "cuda_time_us": 2040.6460000000002, "pct_cuda_time": 2.2965528344079926, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.002, "cuda_time_us": 1274.7350000000001, "pct_cuda_time": 1.4345929070348666, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0014045052093019437, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1273.487, "pct_cuda_time": 1.4331884018255647, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.508, "cuda_time_us": 176.862, "pct_cuda_time": 0.1990413464163144, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.862, "pct_cuda_time": 0.1990413464163144, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.274, "cuda_time_us": 589.049, "pct_cuda_time": 0.6629185809568114, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.0018006477042332614, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.449, "pct_cuda_time": 0.6611179332525782, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2518.894, "cuda_time_us": 2734.522, "pct_cuda_time": 3.077444225922091, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.527, "cuda_time_us": 43.775, "pct_cuda_time": 0.04926459578300688, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.775, "pct_cuda_time": 0.04926459578300688, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1793.012, "cuda_time_us": 605.4639999999999, "pct_cuda_time": 0.6813921009974295, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.858, "cuda_time_us": 276.284, "pct_cuda_time": 0.31093134394773897, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.216, "pct_cuda_time": 0.0013684922552172785, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 275.068, "pct_cuda_time": 0.3095628516925217, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 511.168, "cuda_time_us": 54.88, "pct_cuda_time": 0.06176221625520086, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.88, "pct_cuda_time": 0.06176221625520086, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 755.422, "cuda_time_us": 79.711, "pct_cuda_time": 0.08970714322008594, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.559, "pct_cuda_time": 0.02538800722487384, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.84, "pct_cuda_time": 0.06284260487774082, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0014765311174712744, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 238.093, "cuda_time_us": 194.589, "pct_cuda_time": 0.2189913975744038, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.001440518163386609, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 193.309, "pct_cuda_time": 0.21755087941101717, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 104.372, "cuda_time_us": 44.031, "pct_cuda_time": 0.04955269941568421, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.031, "pct_cuda_time": 0.04955269941568421, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.289, "cuda_time_us": 2041.252, "pct_cuda_time": 2.2972348297259706, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.131, "cuda_time_us": 1273.935, "pct_cuda_time": 1.4336925831827498, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1273.167, "pct_cuda_time": 1.4328282722847177, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.617, "cuda_time_us": 175.646, "pct_cuda_time": 0.1976728541610971, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 175.646, "pct_cuda_time": 0.1976728541610971, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.614, "cuda_time_us": 591.671, "pct_cuda_time": 0.6658693923821237, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.311, "pct_cuda_time": 0.0014754057126561283, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 590.36, "pct_cuda_time": 0.6643939866694676, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2502.823, "cuda_time_us": 2730.877, "pct_cuda_time": 3.073342125370885, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.683, "cuda_time_us": 44.704, "pct_cuda_time": 0.05031009685627732, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.704, "pct_cuda_time": 0.05031009685627732, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1768.807, "cuda_time_us": 603.545, "pct_cuda_time": 0.6792324491571647, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.716, "cuda_time_us": 274.364, "pct_cuda_time": 0.308770566702659, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 273.596, "pct_cuda_time": 0.3079062558046271, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 516.911, "cuda_time_us": 54.527, "pct_cuda_time": 0.061364948355454405, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.527, "pct_cuda_time": 0.061364948355454405, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 756.44, "cuda_time_us": 80.704, "pct_cuda_time": 0.0908246702015257, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.4, "pct_cuda_time": 0.025209067859265657, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.448, "pct_cuda_time": 0.06352685100534945, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.856, "pct_cuda_time": 0.0020887513369105834, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.962, "cuda_time_us": 193.95000000000002, "pct_cuda_time": 0.21827226389752566, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0014765311174712744, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.638, "pct_cuda_time": 0.21679573278005435, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.298, "cuda_time_us": 43.231, "pct_cuda_time": 0.04865237556356758, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.231, "pct_cuda_time": 0.04865237556356758, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 478.319, "cuda_time_us": 2039.397, "pct_cuda_time": 2.295147203793875, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.26, "cuda_time_us": 1275.9189999999999, "pct_cuda_time": 1.4359253863359989, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.792, "pct_cuda_time": 0.0020167254287412526, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1274.127, "pct_cuda_time": 1.4339086609072578, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.54, "cuda_time_us": 176.286, "pct_cuda_time": 0.19839311324279046, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 176.286, "pct_cuda_time": 0.19839311324279046, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 165.788, "cuda_time_us": 587.192, "pct_cuda_time": 0.6608287042150858, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 586.456, "pct_cuda_time": 0.6600004062711384, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2746.955, "cuda_time_us": 2730.523, "pct_cuda_time": 3.0729437320663235, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.225, "cuda_time_us": 43.487, "pct_cuda_time": 0.0489404791962449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.487, "pct_cuda_time": 0.0489404791962449, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2048.062, "cuda_time_us": 604.9200000000001, "pct_cuda_time": 0.6807798807779903, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 162.834, "cuda_time_us": 276.252, "pct_cuda_time": 0.31089533099365435, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.002052738382825918, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 274.428, "pct_cuda_time": 0.3088425926108284, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 544.192, "cuda_time_us": 54.848, "pct_cuda_time": 0.0617262033011162, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.848, "pct_cuda_time": 0.0617262033011162, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 969.65, "cuda_time_us": 80.319, "pct_cuda_time": 0.09039138934769457, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.528, "pct_cuda_time": 0.02535311967560432, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.319, "pct_cuda_time": 0.06338167378419565, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0016565958878946004, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 217.174, "cuda_time_us": 193.50099999999998, "pct_cuda_time": 0.21776695713552513, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.765, "pct_cuda_time": 0.21693865919157787, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.844, "cuda_time_us": 43.583, "pct_cuda_time": 0.04904851805849889, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.583, "pct_cuda_time": 0.04904851805849889, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 464.455, "cuda_time_us": 2038.533, "pct_cuda_time": 2.294174854033589, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.987, "cuda_time_us": 1273.935, "pct_cuda_time": 1.4336925831827498, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0014765311174712744, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1272.623, "pct_cuda_time": 1.4322160520652785, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.746, "cuda_time_us": 175.741, "pct_cuda_time": 0.19777976761853597, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 175.741, "pct_cuda_time": 0.19777976761853597, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.348, "cuda_time_us": 588.8570000000001, "pct_cuda_time": 0.6627025032323036, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0014045052093019437, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 587.609, "pct_cuda_time": 0.6612979980230016, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2484.436, "cuda_time_us": 2753.3070000000002, "pct_cuda_time": 3.098584955374605, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.184, "cuda_time_us": 44.512, "pct_cuda_time": 0.05009401913176933, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.512, "pct_cuda_time": 0.05009401913176933, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1794.415, "cuda_time_us": 602.679, "pct_cuda_time": 0.6782578485872486, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.596, "cuda_time_us": 274.58799999999997, "pct_cuda_time": 0.3090226573812517, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 273.852, "pct_cuda_time": 0.3081943594373044, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 542.306, "cuda_time_us": 54.623, "pct_cuda_time": 0.061472987217708386, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 54.623, "pct_cuda_time": 0.061472987217708386, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 775.572, "cuda_time_us": 79.64699999999999, "pct_cuda_time": 0.08963511731191659, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 22.304, "pct_cuda_time": 0.02510102899701166, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.031, "pct_cuda_time": 0.06305755719743367, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0014765311174712744, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.582, "cuda_time_us": 193.821, "pct_cuda_time": 0.21812708667637185, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.759, "pct_cuda_time": 0.0019795870698414418, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 192.062, "pct_cuda_time": 0.2161474996065304, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.945, "cuda_time_us": 43.456, "pct_cuda_time": 0.048905591646975374, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 43.456, "pct_cuda_time": 0.048905591646975374, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.728, "cuda_time_us": 2062.6600000000003, "pct_cuda_time": 2.321327496008612, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.175, "cuda_time_us": 1276.9750000000001, "pct_cuda_time": 1.437113813820793, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0014045052093019437, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1275.727, "pct_cuda_time": 1.4357093086114912, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.713, "cuda_time_us": 175.997, "pct_cuda_time": 0.1980678712512133, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 175.997, "pct_cuda_time": 0.1980678712512133, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.635, "cuda_time_us": 609.688, "pct_cuda_time": 0.6861458109366053, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.0018006477042332614, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 608.088, "pct_cuda_time": 0.684345163232372, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2321.116, "cuda_time_us": 2975.7990000000004, "pct_cuda_time": 3.3489785235060223, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.078, "cuda_time_us": 45.472, "pct_cuda_time": 0.05117440775430929, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 45.472, "pct_cuda_time": 0.05117440775430929, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1664.773, "cuda_time_us": 651.093, "pct_cuda_time": 0.7327431973077168, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.113, "cuda_time_us": 297.851, "pct_cuda_time": 0.3352029495959882, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.247, "pct_cuda_time": 0.001403379804486798, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 296.604, "pct_cuda_time": 0.3337995697915014, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 475.727, "cuda_time_us": 57.664, "pct_cuda_time": 0.06489534326056674, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 57.664, "pct_cuda_time": 0.06489534326056674, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 709.16, "cuda_time_us": 85.757, "pct_cuda_time": 0.09651134073245737, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 23.519, "pct_cuda_time": 0.026468395847413794, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 60.703, "pct_cuda_time": 0.0683154484937948, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.535, "pct_cuda_time": 0.0017274963912487851, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.575, "cuda_time_us": 209.821, "pct_cuda_time": 0.23613356371870445, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.053, "pct_cuda_time": 0.2352692528206725, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.31, "cuda_time_us": 44.415, "pct_cuda_time": 0.04998485486470019, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.415, "pct_cuda_time": 0.04998485486470019, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 435.655, "cuda_time_us": 2234.8190000000004, "pct_cuda_time": 2.5150760635792957, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.306, "cuda_time_us": 1419.6290000000001, "pct_cuda_time": 1.5976573123206006, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1418.893, "pct_cuda_time": 1.596829014376653, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 89.483, "cuda_time_us": 181.982, "pct_cuda_time": 0.20480341906986085, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 181.982, "pct_cuda_time": 0.20480341906986085, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.933, "cuda_time_us": 633.2080000000001, "pct_cuda_time": 0.7126153321888343, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0014045052093019437, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 631.96, "pct_cuda_time": 0.7112108269795324, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2613.565, "cuda_time_us": 2972.1859999999997, "pct_cuda_time": 3.3449124359089, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.987, "cuda_time_us": 45.535, "pct_cuda_time": 0.05124530825766346, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 45.535, "pct_cuda_time": 0.05124530825766346, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1916.005, "cuda_time_us": 650.329, "pct_cuda_time": 0.7318833880289453, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 171.021, "cuda_time_us": 295.997, "pct_cuda_time": 0.3331164490687079, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.000829423348762446, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 295.26, "pct_cuda_time": 0.33228702571994545, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 554.844, "cuda_time_us": 57.632, "pct_cuda_time": 0.06485933030648207, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 57.632, "pct_cuda_time": 0.06485933030648207, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 837.797, "cuda_time_us": 86.463, "pct_cuda_time": 0.09730587653195027, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 23.68, "pct_cuda_time": 0.026649586022652265, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 61.183, "pct_cuda_time": 0.06885564280506476, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.0018006477042332614, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 202.78, "cuda_time_us": 210.237, "pct_cuda_time": 0.2366017321218051, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.216, "pct_cuda_time": 0.0013684922552172785, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.021, "pct_cuda_time": 0.2352332398665878, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.109, "cuda_time_us": 44.8, "pct_cuda_time": 0.050418135718531314, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.8, "pct_cuda_time": 0.050418135718531314, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.879, "cuda_time_us": 2231.522, "pct_cuda_time": 2.51136560390376, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.027, "cuda_time_us": 1414.381, "pct_cuda_time": 1.5917511878507156, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.76, "pct_cuda_time": 0.0019807124746565874, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1412.621, "pct_cuda_time": 1.5897704753760586, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.692, "cuda_time_us": 182.525, "pct_cuda_time": 0.205414513884485, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 182.525, "pct_cuda_time": 0.205414513884485, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.65, "cuda_time_us": 634.616, "pct_cuda_time": 0.7141999021685596, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 633.88, "pct_cuda_time": 0.7133716042246122, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2399.108, "cuda_time_us": 2969.082, "pct_cuda_time": 3.3414191793626875, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.753, "cuda_time_us": 45.568, "pct_cuda_time": 0.05128244661656328, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 45.568, "pct_cuda_time": 0.05128244661656328, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1722.871, "cuda_time_us": 651.2549999999999, "pct_cuda_time": 0.7329255128877703, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.431, "cuda_time_us": 298.07599999999996, "pct_cuda_time": 0.33545616567939596, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.0018726736124025916, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 296.412, "pct_cuda_time": 0.3335834920669934, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.75, "cuda_time_us": 57.471, "pct_cuda_time": 0.0646781401312436, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 57.471, "pct_cuda_time": 0.0646781401312436, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.232, "cuda_time_us": 86.23899999999999, "pct_cuda_time": 0.09705378585335764, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 23.296, "pct_cuda_time": 0.02621743057363628, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 61.215, "pct_cuda_time": 0.06889165575914943, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.0019446995205719222, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 191.088, "cuda_time_us": 209.469, "pct_cuda_time": 0.2357374212237731, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0008643108980319654, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.701, "pct_cuda_time": 0.23487311032574115, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.87, "cuda_time_us": 44.672, "pct_cuda_time": 0.05027408390219265, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.672, "pct_cuda_time": 0.05027408390219265, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.139, "cuda_time_us": 2227.587, "pct_cuda_time": 2.5069371359561607, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.839, "cuda_time_us": 1414.445, "pct_cuda_time": 1.5918232137588844, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0014045052093019437, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1413.197, "pct_cuda_time": 1.5904187085495827, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.622, "cuda_time_us": 182.91, "pct_cuda_time": 0.20584779473831616, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 182.91, "pct_cuda_time": 0.20584779473831616, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.568, "cuda_time_us": 630.232, "pct_cuda_time": 0.7092661274589603, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 629.496, "pct_cuda_time": 0.7084378295150131, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2412.622, "cuda_time_us": 2944.058, "pct_cuda_time": 3.3132570492684787, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.806, "cuda_time_us": 44.672, "pct_cuda_time": 0.05027408390219265, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.672, "pct_cuda_time": 0.05027408390219265, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1706.449, "cuda_time_us": 645.143, "pct_cuda_time": 0.7260470386575993, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.072, "cuda_time_us": 294.012, "pct_cuda_time": 0.3308825205106435, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 293.276, "pct_cuda_time": 0.3300542225666962, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[4096, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 490.325, "cuda_time_us": 57.503, "pct_cuda_time": 0.06471415308532825, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 57.503, "pct_cuda_time": 0.06471415308532825, "trace": "_C::rotary_embedding(int64[4096], bfloat16[4096, 4096], bfloat16[4096, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.556, "cuda_time_us": 85.919, "pct_cuda_time": 0.09669365631251099, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 23.68, "pct_cuda_time": 0.026649586022652265, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[4096], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 60.575, "pct_cuda_time": 0.06817139667745613, "trace": "_vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.0018726736124025916, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], None, None, bfloat16[4096, 32, 128], int32[33], int32[33], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[4096, 32, 128], bfloat16[4096, 8, 128], bfloat16[4096, 8, 128], bfloat16[4096, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 204.854, "cuda_time_us": 207.709, "pct_cuda_time": 0.23375670874911653, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.0019086865664872568, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 206.013, "pct_cuda_time": 0.2318480221826293, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[4096, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 102.782, "cuda_time_us": 44.576, "pct_cuda_time": 0.05016604503993866, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.576, "pct_cuda_time": 0.05016604503993866, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 469.831, "cuda_time_us": 2209.667, "pct_cuda_time": 2.4867698816687485, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.851, "cuda_time_us": 1400.269, "pct_cuda_time": 1.5758694750993778, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0008282979439473002, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1399.533, "pct_cuda_time": 1.5750411771554305, "trace": "mm(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[4096, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[4096, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.986, "cuda_time_us": 181.598, "pct_cuda_time": 0.20437126362084487, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 181.598, "pct_cuda_time": 0.20437126362084487, "trace": "_C::silu_and_mul(bfloat16[4096, 14336], bfloat16[4096, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 157.492, "cuda_time_us": 627.8000000000001, "pct_cuda_time": 0.706529142948526, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0015125440715559396, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 626.456, "pct_cuda_time": 0.70501659887697, "trace": "mm(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[4096, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[4096, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.062, "cuda_time_us": 44.576, "pct_cuda_time": 0.05016604503993866, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 44.576, "pct_cuda_time": 0.05016604503993866, "trace": "_C::fused_add_rms_norm(bfloat16[4096, 4096], bfloat16[4096, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 551.601, "cuda_time_us": 408.219, "pct_cuda_time": 0.4594116282339985, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 4.576, "pct_cuda_time": 0.005149852434107127, "trace": "index_select(bfloat16[4096, 4096], 0, int64[32])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.00158456997972527, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[32, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 402.235, "pct_cuda_time": 0.45267720582016613, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[32, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 10532.643, "cuda_time_us": 192.477, "pct_cuda_time": 0.21661454260481589, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.0035292695002971924, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.0028090104186038875, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.002845023372688553, "trace": "copy_(int32[32], int32[32], True) <- _to_copy(int32[32], 3, 0, None, None, True, None) <- to(int32[32], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.002736984510434557, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.0026649586022652267, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.0026649586022652267, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.002881036326773218, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 13.952, "pct_cuda_time": 0.01570164798091404, "trace": "copy_(float32[32, 128256], bfloat16[32, 128256], False) <- _to_copy(bfloat16[32, 128256], 6, None, None, None, False, None) <- to(bfloat16[32, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 23.231, "pct_cuda_time": 0.02614427926065181, "trace": "div_(float32[32, 128256], bfloat16[32, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 40.48, "pct_cuda_time": 0.04555638691710151, "trace": "_softmax(float32[32, 128256], -1, False) <- softmax(float32[32, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 36.223, "pct_cuda_time": 0.04076553861902589, "trace": "_log_softmax(float32[32, 128256], -1, False) <- log_softmax(float32[32, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 2.176, "pct_cuda_time": 0.0024488808777572355, "trace": "copy_(int64[32], int32[32], False) <- _to_copy(int32[32], 4, None, None, None, False, None) <- to(int32[32], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 22.464, "pct_cuda_time": 0.025281093767434985, "trace": "index(float32[32, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 32.895, "pct_cuda_time": 0.037020191394220706, "trace": "argmax(float32[32, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0035652824543818576, "trace": "copy_(int64[32], int64[32], False) <- _to_copy(int64[32], 4, 0, None, None, False, None) <- to(int64[32], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] }, "decode_1": { "metadata": { "num_running_seqs": 32 }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 6775.041000000001, "pct_cuda_time": 92.43085259066764, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 3.968, "pct_cuda_time": 0.05413481971249608, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 3.968, "pct_cuda_time": 0.05413481971249608, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 6767.969, "pct_cuda_time": 92.33437037166391, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 198.5910000000001, "pct_cuda_time": 2.7093467695373774, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.512, "pct_cuda_time": 0.061556528866628595, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 194.07900000000012, "pct_cuda_time": 2.647790240670749, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 2100.993, "pct_cuda_time": 28.663527538360956, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 725.365, "pct_cuda_time": 9.896044229020847, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 725.365, "pct_cuda_time": 9.896044229020847, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 121.27799999999999, "pct_cuda_time": 1.65457728454942, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 121.27799999999999, "pct_cuda_time": 1.65457728454942, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 722.995, "pct_cuda_time": 9.863710679948614, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 81.14999999999999, "pct_cuda_time": 1.1071170916504678, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 599.953, "pct_cuda_time": 8.185067412039103, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 41.891999999999996, "pct_cuda_time": 0.5715261762590437, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 531.3550000000001, "pct_cuda_time": 7.249195344842077, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 531.3550000000001, "pct_cuda_time": 7.249195344842077, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 4468.385, "pct_cuda_time": 60.96149606376557, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 2667.2919999999995, "pct_cuda_time": 36.38945855357436, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2667.2919999999995, "pct_cuda_time": 36.38945855357436, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 297.307, "pct_cuda_time": 4.056114124058234, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 297.307, "pct_cuda_time": 4.056114124058234, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 1503.7859999999998, "pct_cuda_time": 20.515923386132968, "invocations": 32 }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cuda_time_us": 1421.7079999999999, "pct_cuda_time": 19.396145731807803, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cuda_time_us": 82.07800000000002, "pct_cuda_time": 1.1197776543251647, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 393.467, "pct_cuda_time": 5.3680103598328355, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 3.584, "pct_cuda_time": 0.04889596619193194, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.010041135914414596, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 389.147, "pct_cuda_time": 5.30907325772649, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 161.33999999999997, "pct_cuda_time": 2.201137049499525, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.536, "pct_cuda_time": 0.0755268049214663, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 12.672, "pct_cuda_time": 0.17288216617861651, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 20.0, "pct_cuda_time": 0.2728569541960488, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 36.159, "pct_cuda_time": 0.49331173033874637, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 32.927, "pct_cuda_time": 0.4492180465406649, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.952, "pct_cuda_time": 0.02663083872953436, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 20.031, "pct_cuda_time": 0.27327988247505264, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 29.599, "pct_cuda_time": 0.4038146493624424, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.464, "pct_cuda_time": 0.03361597675695321, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 86816.82, "cuda_time_us": 6775.041000000001, "pct_cuda_time": 92.43085259066764, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 439.795, "cuda_time_us": 3.968, "pct_cuda_time": 0.05413481971249608, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 3.968, "pct_cuda_time": 0.05413481971249608, "trace": "index_select(bfloat16[128256, 4096], 0, int64[32]) <- embedding(bfloat16[128256, 4096], int64[32], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 5657.47, "cuda_time_us": 218.23499999999999, "pct_cuda_time": 2.977346869948735, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 402.14, "cuda_time_us": 4.512, "pct_cuda_time": 0.061556528866628595, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.512, "pct_cuda_time": 0.061556528866628595, "trace": "_C::rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 4098.978, "cuda_time_us": 71.518, "pct_cuda_time": 0.9757091825096509, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 830.791, "cuda_time_us": 27.168, "pct_cuda_time": 0.37064888657991263, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.168, "pct_cuda_time": 0.37064888657991263, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1170.728, "cuda_time_us": 3.583, "pct_cuda_time": 0.048882323344222135, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.583, "pct_cuda_time": 0.048882323344222135, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1382.633, "cuda_time_us": 23.711, "pct_cuda_time": 0.3234855620471256, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.032306263376812173, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.063, "pct_cuda_time": 0.27371645360176633, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 301.747, "cuda_time_us": 17.056, "pct_cuda_time": 0.2326924105383904, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 17.056, "pct_cuda_time": 0.2326924105383904, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 172.599, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 830.113, "cuda_time_us": 139.22899999999998, "pct_cuda_time": 1.8994800437880834, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 253.938, "cuda_time_us": 82.495, "pct_cuda_time": 1.1254667218201522, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.495, "pct_cuda_time": 1.1254667218201522, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 215.382, "cuda_time_us": 9.183, "pct_cuda_time": 0.12528227051911578, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.183, "pct_cuda_time": 0.12528227051911578, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 259.353, "cuda_time_us": 47.551, "pct_cuda_time": 0.6487310514488158, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 45.215, "pct_cuda_time": 0.6168613591987173, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.336, "pct_cuda_time": 0.031869692250098496, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4600.423, "cuda_time_us": 211.07, "pct_cuda_time": 2.8795958661080006, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.42, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 3570.193, "cuda_time_us": 65.18299999999999, "pct_cuda_time": 0.8892817422680523, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.18, "cuda_time_us": 22.559, "pct_cuda_time": 0.30776900148543324, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.559, "pct_cuda_time": 0.30776900148543324, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 606.106, "cuda_time_us": 3.68, "pct_cuda_time": 0.05020567957207298, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05020567957207298, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 2077.285, "cuda_time_us": 22.4, "pct_cuda_time": 0.30559978869957466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.464, "pct_cuda_time": 0.25190154011379223, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.019209129575401832, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 467.937, "cuda_time_us": 16.544, "pct_cuda_time": 0.22570727251097156, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.544, "pct_cuda_time": 0.22570727251097156, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 227.767, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 599.571, "cuda_time_us": 139.679, "pct_cuda_time": 1.905619325257495, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.927, "cuda_time_us": 82.751, "pct_cuda_time": 1.1289592908338617, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.751, "pct_cuda_time": 1.1289592908338617, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 190.444, "cuda_time_us": 9.408, "pct_cuda_time": 0.12835191125382134, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.408, "pct_cuda_time": 0.12835191125382134, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.46, "cuda_time_us": 47.519999999999996, "pct_cuda_time": 0.6483081231698118, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.8, "pct_cuda_time": 0.6111995773991493, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.72, "pct_cuda_time": 0.03710854577066264, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3124.128, "cuda_time_us": 211.42199999999997, "pct_cuda_time": 2.884398148501851, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.552, "cuda_time_us": 3.137, "pct_cuda_time": 0.04279761326565025, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.04279761326565025, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2396.945, "cuda_time_us": 65.63, "pct_cuda_time": 0.8953800951943339, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.765, "cuda_time_us": 22.496, "pct_cuda_time": 0.3069095020797157, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.496, "pct_cuda_time": 0.3069095020797157, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 772.144, "cuda_time_us": 3.68, "pct_cuda_time": 0.05020567957207298, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05020567957207298, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1035.52, "cuda_time_us": 22.654000000000003, "pct_cuda_time": 0.3090650720178645, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.495, "pct_cuda_time": 0.034038905035957086, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.847, "pct_cuda_time": 0.2571267507866466, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 209.318, "cuda_time_us": 16.8, "pct_cuda_time": 0.229199841524681, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.8, "pct_cuda_time": 0.229199841524681, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.882, "cuda_time_us": 3.2, "pct_cuda_time": 0.043657112671367806, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.043657112671367806, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.617, "cuda_time_us": 139.45499999999998, "pct_cuda_time": 1.902563327370499, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.504, "cuda_time_us": 83.167, "pct_cuda_time": 1.1346347154811394, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.167, "pct_cuda_time": 1.1346347154811394, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.294, "cuda_time_us": 9.728, "pct_cuda_time": 0.13271762252095812, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.728, "pct_cuda_time": 0.13271762252095812, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.891, "cuda_time_us": 46.559999999999995, "pct_cuda_time": 0.6352109893684015, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.032, "pct_cuda_time": 0.6007218703580209, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2532.209, "cuda_time_us": 210.77800000000002, "pct_cuda_time": 2.875612154576739, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.238, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1795.089, "cuda_time_us": 65.21300000000001, "pct_cuda_time": 0.8896910276993466, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.561, "cuda_time_us": 22.335, "pct_cuda_time": 0.3047130035984375, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.335, "pct_cuda_time": 0.3047130035984375, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 545.613, "cuda_time_us": 3.872, "pct_cuda_time": 0.052825106332355036, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.052825106332355036, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 765.609, "cuda_time_us": 22.303, "pct_cuda_time": 0.3042764324717238, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.527, "pct_cuda_time": 0.2527610395195098, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 177.227, "cuda_time_us": 16.703, "pct_cuda_time": 0.22787648529683013, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.703, "pct_cuda_time": 0.22787648529683013, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 120.486, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.323, "cuda_time_us": 139.357, "pct_cuda_time": 1.9012263282949384, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 145.182, "cuda_time_us": 83.646, "pct_cuda_time": 1.1411696395341349, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.646, "pct_cuda_time": 1.1411696395341349, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.365, "cuda_time_us": 9.312, "pct_cuda_time": 0.12704219787368032, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.312, "pct_cuda_time": 0.12704219787368032, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.995, "cuda_time_us": 46.399, "pct_cuda_time": 0.6330144908871234, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 43.679, "pct_cuda_time": 0.5959059451164608, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.72, "pct_cuda_time": 0.03710854577066264, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2508.014, "cuda_time_us": 211.421, "pct_cuda_time": 2.8843845056541415, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.598, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1821.596, "cuda_time_us": 64.926, "pct_cuda_time": 0.8857755304066331, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.752, "cuda_time_us": 22.271, "pct_cuda_time": 0.3038398613450101, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.271, "pct_cuda_time": 0.3038398613450101, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 523.941, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 774.415, "cuda_time_us": 22.463, "pct_cuda_time": 0.3064592881052922, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.623, "pct_cuda_time": 0.2540707528996508, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 203.289, "cuda_time_us": 16.48, "pct_cuda_time": 0.22483413025754423, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.48, "pct_cuda_time": 0.22483413025754423, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.357, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.907, "cuda_time_us": 140.415, "pct_cuda_time": 1.9156604611719092, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.74, "cuda_time_us": 83.871, "pct_cuda_time": 1.1442392802688404, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.871, "pct_cuda_time": 1.1442392802688404, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.037, "cuda_time_us": 9.12, "pct_cuda_time": 0.12442277111339824, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.12442277111339824, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.564, "cuda_time_us": 47.424, "pct_cuda_time": 0.6469984097896708, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.896, "pct_cuda_time": 0.6125092907792903, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2597.283, "cuda_time_us": 210.844, "pct_cuda_time": 2.876512582525585, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.305, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1882.498, "cuda_time_us": 64.99000000000001, "pct_cuda_time": 0.8866486726600606, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.587, "cuda_time_us": 22.271, "pct_cuda_time": 0.3038398613450101, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.271, "pct_cuda_time": 0.3038398613450101, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 533.136, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 844.52, "cuda_time_us": 22.399, "pct_cuda_time": 0.30558614585186483, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.559, "pct_cuda_time": 0.2531976106462235, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 194.935, "cuda_time_us": 16.576, "pct_cuda_time": 0.22614384363768525, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.576, "pct_cuda_time": 0.22614384363768525, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.292, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.407, "cuda_time_us": 139.774, "pct_cuda_time": 1.906915395789926, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 147.15, "cuda_time_us": 83.679, "pct_cuda_time": 1.1416198535085582, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.679, "pct_cuda_time": 1.1416198535085582, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.499, "cuda_time_us": 9.184, "pct_cuda_time": 0.12529591336682558, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.12529591336682558, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.076, "cuda_time_us": 46.911, "pct_cuda_time": 0.6399996289145423, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.351, "pct_cuda_time": 0.605073938777448, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2548.565, "cuda_time_us": 212.894, "pct_cuda_time": 2.9044804203306804, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.403, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1720.674, "cuda_time_us": 66.113, "pct_cuda_time": 0.9019695906381686, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.32, "cuda_time_us": 23.04, "pct_cuda_time": 0.3143312112338482, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 23.04, "pct_cuda_time": 0.3143312112338482, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 503.471, "cuda_time_us": 3.84, "pct_cuda_time": 0.052388535205641365, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.052388535205641365, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 736.156, "cuda_time_us": 22.880999999999997, "pct_cuda_time": 0.31216199844798953, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.04, "pct_cuda_time": 0.2597598203946384, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.0179130590429706, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 175.159, "cuda_time_us": 16.352, "pct_cuda_time": 0.22308784575068946, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.352, "pct_cuda_time": 0.22308784575068946, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.869, "cuda_time_us": 2.848, "pct_cuda_time": 0.03885483027751734, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.848, "pct_cuda_time": 0.03885483027751734, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 580.176, "cuda_time_us": 140.797, "pct_cuda_time": 1.920872028997054, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 149.762, "cuda_time_us": 84.735, "pct_cuda_time": 1.1560267006901097, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.735, "pct_cuda_time": 1.1560267006901097, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 222.434, "cuda_time_us": 9.407, "pct_cuda_time": 0.12833826840611154, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.407, "pct_cuda_time": 0.12833826840611154, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.202, "cuda_time_us": 46.655, "pct_cuda_time": 0.6365070599008328, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.127, "pct_cuda_time": 0.6020179408904522, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2489.438, "cuda_time_us": 211.25799999999998, "pct_cuda_time": 2.8821607214774434, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.309, "cuda_time_us": 3.135, "pct_cuda_time": 0.042770327570230644, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.042770327570230644, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1802.388, "cuda_time_us": 65.502, "pct_cuda_time": 0.8936338106874793, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.831, "cuda_time_us": 22.303, "pct_cuda_time": 0.3042764324717238, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.303, "pct_cuda_time": 0.3042764324717238, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 530.984, "cuda_time_us": 4.0, "pct_cuda_time": 0.054571390839209755, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.0, "pct_cuda_time": 0.054571390839209755, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 782.54, "cuda_time_us": 22.399, "pct_cuda_time": 0.30558614585186483, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.035798832390521604, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.495, "pct_cuda_time": 0.2523244683927961, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.651, "cuda_time_us": 16.8, "pct_cuda_time": 0.229199841524681, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.8, "pct_cuda_time": 0.229199841524681, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.162, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.127, "cuda_time_us": 139.64499999999998, "pct_cuda_time": 1.9051554684353613, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 145.794, "cuda_time_us": 83.71, "pct_cuda_time": 1.142042781787562, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.71, "pct_cuda_time": 1.142042781787562, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.538, "cuda_time_us": 8.96, "pct_cuda_time": 0.12223991547982986, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.12223991547982986, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.954, "cuda_time_us": 46.975, "pct_cuda_time": 0.6408727711679696, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.447, "pct_cuda_time": 0.606383652157589, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2428.145, "cuda_time_us": 211.10000000000002, "pct_cuda_time": 2.880005151539295, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.07, "cuda_time_us": 3.105, "pct_cuda_time": 0.04236104213893657, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.04236104213893657, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1764.886, "cuda_time_us": 66.01400000000001, "pct_cuda_time": 0.9006189487148983, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.644, "cuda_time_us": 23.328, "pct_cuda_time": 0.3182603513742713, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 23.328, "pct_cuda_time": 0.3182603513742713, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.212, "cuda_time_us": 3.775, "pct_cuda_time": 0.0515017501045042, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.775, "pct_cuda_time": 0.0515017501045042, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 791.028, "cuda_time_us": 22.591, "pct_cuda_time": 0.3082055726121469, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.751, "pct_cuda_time": 0.2558170374065056, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.01, "cuda_time_us": 16.32, "pct_cuda_time": 0.22265127462397583, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.32, "pct_cuda_time": 0.22265127462397583, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.878, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 427.861, "cuda_time_us": 138.973, "pct_cuda_time": 1.8959874747743748, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.601, "cuda_time_us": 83.391, "pct_cuda_time": 1.1376907133681353, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.391, "pct_cuda_time": 1.1376907133681353, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.405, "cuda_time_us": 9.151, "pct_cuda_time": 0.12484569939240212, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.151, "pct_cuda_time": 0.12484569939240212, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.41, "cuda_time_us": 46.431, "pct_cuda_time": 0.633451062013837, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 43.903, "pct_cuda_time": 0.5989619430034564, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2464.703, "cuda_time_us": 211.038, "pct_cuda_time": 2.879159294981287, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.819, "cuda_time_us": 3.2, "pct_cuda_time": 0.043657112671367806, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.043657112671367806, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1785.486, "cuda_time_us": 65.343, "pct_cuda_time": 0.8914645979016209, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 239.146, "cuda_time_us": 22.336, "pct_cuda_time": 0.3047266464461473, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.336, "pct_cuda_time": 0.3047266464461473, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.979, "cuda_time_us": 3.84, "pct_cuda_time": 0.052388535205641365, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.052388535205641365, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 722.35, "cuda_time_us": 22.623, "pct_cuda_time": 0.3086421437388606, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.815, "pct_cuda_time": 0.2566901796599329, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.259, "cuda_time_us": 16.544, "pct_cuda_time": 0.22570727251097156, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.544, "pct_cuda_time": 0.22570727251097156, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.442, "cuda_time_us": 3.009, "pct_cuda_time": 0.04105132875879554, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.009, "pct_cuda_time": 0.04105132875879554, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 440.304, "cuda_time_us": 139.486, "pct_cuda_time": 1.902986255649503, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.974, "cuda_time_us": 83.967, "pct_cuda_time": 1.1455489936489813, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.967, "pct_cuda_time": 1.1455489936489813, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.897, "cuda_time_us": 9.184, "pct_cuda_time": 0.12529591336682558, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.12529591336682558, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.376, "cuda_time_us": 46.335, "pct_cuda_time": 0.632141348633696, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 43.775, "pct_cuda_time": 0.5972156584966017, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2411.967, "cuda_time_us": 211.646, "pct_cuda_time": 2.887454146388847, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 93.818, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1735.923, "cuda_time_us": 66.01599999999999, "pct_cuda_time": 0.9006462344103177, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.041, "cuda_time_us": 23.296, "pct_cuda_time": 0.3178237802475576, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 23.296, "pct_cuda_time": 0.3178237802475576, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 528.456, "cuda_time_us": 3.68, "pct_cuda_time": 0.05020567957207298, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05020567957207298, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.143, "cuda_time_us": 22.496, "pct_cuda_time": 0.3069095020797157, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.624, "pct_cuda_time": 0.25408439574736064, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 171.684, "cuda_time_us": 16.544, "pct_cuda_time": 0.22570727251097156, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.544, "pct_cuda_time": 0.22570727251097156, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.424, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 428.336, "cuda_time_us": 139.518, "pct_cuda_time": 1.9034228267762168, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 139.685, "cuda_time_us": 82.911, "pct_cuda_time": 1.13114214646743, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.911, "pct_cuda_time": 1.13114214646743, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.366, "cuda_time_us": 9.184, "pct_cuda_time": 0.12529591336682558, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.12529591336682558, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.408, "cuda_time_us": 47.422999999999995, "pct_cuda_time": 0.6469847669419609, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.8, "pct_cuda_time": 0.6111995773991493, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.623, "pct_cuda_time": 0.0357851895428118, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2410.04, "cuda_time_us": 209.661, "pct_cuda_time": 2.8603730936848892, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.69, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1752.349, "cuda_time_us": 64.99, "pct_cuda_time": 0.8866486726600604, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.868, "cuda_time_us": 22.303, "pct_cuda_time": 0.3042764324717238, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.303, "pct_cuda_time": 0.3042764324717238, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 488.84, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 713.794, "cuda_time_us": 22.304, "pct_cuda_time": 0.3042900753194336, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.528, "pct_cuda_time": 0.25277468236721956, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.416, "cuda_time_us": 16.639, "pct_cuda_time": 0.22700334304340278, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.639, "pct_cuda_time": 0.22700334304340278, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.903, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 421.57, "cuda_time_us": 138.495, "pct_cuda_time": 1.8894661935690888, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 146.236, "cuda_time_us": 82.751, "pct_cuda_time": 1.1289592908338617, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.751, "pct_cuda_time": 1.1289592908338617, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.654, "cuda_time_us": 9.152, "pct_cuda_time": 0.12485934224011191, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.12485934224011191, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.449, "cuda_time_us": 46.592, "pct_cuda_time": 0.6356475604951152, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.064, "pct_cuda_time": 0.6011584414847347, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2595.964, "cuda_time_us": 211.90099999999998, "pct_cuda_time": 2.8909330725548466, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.603, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1896.427, "cuda_time_us": 65.247, "pct_cuda_time": 0.8901548845214798, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.054, "cuda_time_us": 22.271, "pct_cuda_time": 0.3038398613450101, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.271, "pct_cuda_time": 0.3038398613450101, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.101, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 903.627, "cuda_time_us": 22.56, "pct_cuda_time": 0.307782644333143, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.688, "pct_cuda_time": 0.25495753800078796, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.738, "cuda_time_us": 16.672, "pct_cuda_time": 0.22745355701782624, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.672, "pct_cuda_time": 0.22745355701782624, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.986, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 452.813, "cuda_time_us": 140.51, "pct_cuda_time": 1.9169565317043404, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.225, "cuda_time_us": 83.839, "pct_cuda_time": 1.1438027091421266, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.839, "pct_cuda_time": 1.1438027091421266, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.159, "cuda_time_us": 9.472, "pct_cuda_time": 0.1292250535072487, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.472, "pct_cuda_time": 0.1292250535072487, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.556, "cuda_time_us": 47.199000000000005, "pct_cuda_time": 0.6439287690549654, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.639, "pct_cuda_time": 0.6090030789178711, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2319.211, "cuda_time_us": 211.901, "pct_cuda_time": 2.890933072554847, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.908, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1668.687, "cuda_time_us": 65.791, "pct_cuda_time": 0.8975765936756123, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.712, "cuda_time_us": 23.231, "pct_cuda_time": 0.3169369951464205, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 23.231, "pct_cuda_time": 0.3169369951464205, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 500.296, "cuda_time_us": 3.872, "pct_cuda_time": 0.052825106332355036, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.052825106332355036, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 706.201, "cuda_time_us": 22.336, "pct_cuda_time": 0.3047266464461473, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.496, "pct_cuda_time": 0.2523381112405059, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 170.391, "cuda_time_us": 16.352, "pct_cuda_time": 0.22308784575068946, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.352, "pct_cuda_time": 0.22308784575068946, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.206, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 435.958, "cuda_time_us": 139.934, "pct_cuda_time": 1.9090982514234947, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.143, "cuda_time_us": 83.455, "pct_cuda_time": 1.1385638556215625, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.455, "pct_cuda_time": 1.1385638556215625, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.633, "cuda_time_us": 8.992, "pct_cuda_time": 0.12267648660654355, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12267648660654355, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.543, "cuda_time_us": 47.487, "pct_cuda_time": 0.6478579091953884, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.959, "pct_cuda_time": 0.6133687901850079, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2472.758, "cuda_time_us": 210.171, "pct_cuda_time": 2.8673309460168883, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.643, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1768.689, "cuda_time_us": 65.726, "pct_cuda_time": 0.8966898085744751, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 170.07, "cuda_time_us": 22.944, "pct_cuda_time": 0.31302149785370714, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.944, "pct_cuda_time": 0.31302149785370714, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 503.413, "cuda_time_us": 3.872, "pct_cuda_time": 0.052825106332355036, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.052825106332355036, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 758.431, "cuda_time_us": 22.654000000000003, "pct_cuda_time": 0.3090650720178645, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.527, "pct_cuda_time": 0.03447547616267077, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.815, "pct_cuda_time": 0.2566901796599329, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.989, "cuda_time_us": 16.256, "pct_cuda_time": 0.22177813237054844, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.256, "pct_cuda_time": 0.22177813237054844, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.133, "cuda_time_us": 2.911, "pct_cuda_time": 0.0397143296832349, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.911, "pct_cuda_time": 0.0397143296832349, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 461.949, "cuda_time_us": 138.43, "pct_cuda_time": 1.8885794084679517, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.865, "cuda_time_us": 83.103, "pct_cuda_time": 1.133761573227712, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.103, "pct_cuda_time": 1.133761573227712, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 125.509, "cuda_time_us": 9.056, "pct_cuda_time": 0.12354962885997088, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.12354962885997088, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.405, "cuda_time_us": 46.271, "pct_cuda_time": 0.6312682063802687, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 43.583, "pct_cuda_time": 0.5945962317363196, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.03667197464394896, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2384.087, "cuda_time_us": 210.654, "pct_cuda_time": 2.8739204414607227, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.029, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1705.213, "cuda_time_us": 65.75999999999999, "pct_cuda_time": 0.8971536653966083, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.033, "cuda_time_us": 22.304, "pct_cuda_time": 0.3042900753194336, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.304, "pct_cuda_time": 0.3042900753194336, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 478.51, "cuda_time_us": 3.743, "pct_cuda_time": 0.051065178977790525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.743, "pct_cuda_time": 0.051065178977790525, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 747.783, "cuda_time_us": 22.272999999999996, "pct_cuda_time": 0.30386714704042966, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.464, "pct_cuda_time": 0.25190154011379223, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.0179130590429706, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.404, "cuda_time_us": 17.44, "pct_cuda_time": 0.23793126405895457, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 17.44, "pct_cuda_time": 0.23793126405895457, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.447, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 434.172, "cuda_time_us": 138.71800000000002, "pct_cuda_time": 1.892508548608375, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.448, "cuda_time_us": 82.783, "pct_cuda_time": 1.1293958619605753, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.783, "pct_cuda_time": 1.1293958619605753, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.961, "cuda_time_us": 9.632, "pct_cuda_time": 0.1314079091408171, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.632, "pct_cuda_time": 0.1314079091408171, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.849, "cuda_time_us": 46.303, "pct_cuda_time": 0.6317047775069823, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 43.775, "pct_cuda_time": 0.5972156584966017, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2472.947, "cuda_time_us": 211.58200000000002, "pct_cuda_time": 2.88658100413542, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.018, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1810.916, "cuda_time_us": 65.82400000000001, "pct_cuda_time": 0.898026807650036, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.771, "cuda_time_us": 22.943, "pct_cuda_time": 0.31300785500599737, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.943, "pct_cuda_time": 0.31300785500599737, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 516.496, "cuda_time_us": 3.905, "pct_cuda_time": 0.05327532030677852, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.905, "pct_cuda_time": 0.05327532030677852, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 811.539, "cuda_time_us": 22.272, "pct_cuda_time": 0.3038535041927199, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.464, "pct_cuda_time": 0.25190154011379223, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.581, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.47, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 431.937, "cuda_time_us": 139.64600000000002, "pct_cuda_time": 1.9051691112830715, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 146.332, "cuda_time_us": 83.487, "pct_cuda_time": 1.1390004267482763, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.487, "pct_cuda_time": 1.1390004267482763, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.07, "cuda_time_us": 9.408, "pct_cuda_time": 0.12835191125382134, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.408, "pct_cuda_time": 0.12835191125382134, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.131, "cuda_time_us": 46.751000000000005, "pct_cuda_time": 0.6378167732809739, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.063, "pct_cuda_time": 0.6011447986370249, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.03667197464394896, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2388.101, "cuda_time_us": 212.893, "pct_cuda_time": 2.9044667774829707, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.929, "cuda_time_us": 3.135, "pct_cuda_time": 0.042770327570230644, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.042770327570230644, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1726.791, "cuda_time_us": 66.01599999999999, "pct_cuda_time": 0.9006462344103177, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.169, "cuda_time_us": 22.976, "pct_cuda_time": 0.31345806898042083, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.976, "pct_cuda_time": 0.31345806898042083, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 546.357, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 725.227, "cuda_time_us": 22.591, "pct_cuda_time": 0.3082055726121469, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.783, "pct_cuda_time": 0.25625360853321927, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.545, "cuda_time_us": 16.737, "pct_cuda_time": 0.22834034211896342, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.737, "pct_cuda_time": 0.22834034211896342, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.616, "cuda_time_us": 3.2, "pct_cuda_time": 0.043657112671367806, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.043657112671367806, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 433.388, "cuda_time_us": 140.542, "pct_cuda_time": 1.9173931028310542, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 147.184, "cuda_time_us": 83.006, "pct_cuda_time": 1.1324382169998612, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.006, "pct_cuda_time": 1.1324382169998612, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.321, "cuda_time_us": 9.568, "pct_cuda_time": 0.13053476688738974, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.568, "pct_cuda_time": 0.13053476688738974, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.983, "cuda_time_us": 47.968, "pct_cuda_time": 0.6544201189438035, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 45.408, "pct_cuda_time": 0.6194944288067092, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2431.504, "cuda_time_us": 209.726, "pct_cuda_time": 2.861259878786026, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.484, "cuda_time_us": 3.328, "pct_cuda_time": 0.04540339717822252, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.04540339717822252, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1768.104, "cuda_time_us": 65.088, "pct_cuda_time": 0.8879856717356212, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.792, "cuda_time_us": 22.368, "pct_cuda_time": 0.30516321757286097, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.368, "pct_cuda_time": 0.30516321757286097, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 519.532, "cuda_time_us": 3.776, "pct_cuda_time": 0.05151539295221401, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05151539295221401, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 766.112, "cuda_time_us": 22.432, "pct_cuda_time": 0.3060363598262883, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.624, "pct_cuda_time": 0.25408439574736064, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 179.375, "cuda_time_us": 16.512, "pct_cuda_time": 0.22527070138425787, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.512, "pct_cuda_time": 0.22527070138425787, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.909, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04147425703779942, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 432.951, "cuda_time_us": 138.27, "pct_cuda_time": 1.8863965528343833, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.737, "cuda_time_us": 82.495, "pct_cuda_time": 1.1254667218201522, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.495, "pct_cuda_time": 1.1254667218201522, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.066, "cuda_time_us": 9.215, "pct_cuda_time": 0.12571884164582947, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.215, "pct_cuda_time": 0.12571884164582947, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.343, "cuda_time_us": 46.56, "pct_cuda_time": 0.6352109893684016, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.0, "pct_cuda_time": 0.6002852992313074, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2673.759, "cuda_time_us": 210.55700000000002, "pct_cuda_time": 2.8725970852328726, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.78, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1812.065, "cuda_time_us": 65.15100000000001, "pct_cuda_time": 0.8888451711413389, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.684, "cuda_time_us": 21.984, "pct_cuda_time": 0.29992436405229683, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.984, "pct_cuda_time": 0.29992436405229683, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 527.396, "cuda_time_us": 3.808, "pct_cuda_time": 0.05195196407892769, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05195196407892769, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 811.103, "cuda_time_us": 22.655, "pct_cuda_time": 0.3090787148655743, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.815, "pct_cuda_time": 0.2566901796599329, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 194.134, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.925, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 645.8, "cuda_time_us": 139.166, "pct_cuda_time": 1.898620544382366, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.616, "cuda_time_us": 83.166, "pct_cuda_time": 1.1346210726334296, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.166, "pct_cuda_time": 1.1346210726334296, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.713, "cuda_time_us": 9.28, "pct_cuda_time": 0.12660562674696663, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.28, "pct_cuda_time": 0.12660562674696663, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 358.184, "cuda_time_us": 46.72, "pct_cuda_time": 0.6373938450019699, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.16, "pct_cuda_time": 0.6024681548648756, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2360.803, "cuda_time_us": 209.183, "pct_cuda_time": 2.8538518124796033, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.752, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1688.187, "cuda_time_us": 65.152, "pct_cuda_time": 0.8888588139890485, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.085, "cuda_time_us": 22.272, "pct_cuda_time": 0.3038535041927199, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.272, "pct_cuda_time": 0.3038535041927199, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 481.734, "cuda_time_us": 3.776, "pct_cuda_time": 0.05151539295221401, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05151539295221401, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.16, "cuda_time_us": 22.688, "pct_cuda_time": 0.3095289288399977, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.72, "pct_cuda_time": 0.25539410912750166, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.019645700702115514, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.047, "cuda_time_us": 16.416, "pct_cuda_time": 0.22396098800411685, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.416, "pct_cuda_time": 0.22396098800411685, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.107, "cuda_time_us": 2.88, "pct_cuda_time": 0.03929140140423103, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.88, "pct_cuda_time": 0.03929140140423103, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 441.144, "cuda_time_us": 137.983, "pct_cuda_time": 1.88248105554167, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 144.296, "cuda_time_us": 82.079, "pct_cuda_time": 1.1197912971728743, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.079, "pct_cuda_time": 1.1197912971728743, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.605, "cuda_time_us": 9.152, "pct_cuda_time": 0.12485934224011191, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.12485934224011191, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.705, "cuda_time_us": 46.752, "pct_cuda_time": 0.6378304161286836, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.192, "pct_cuda_time": 0.6029047259915894, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2402.587, "cuda_time_us": 211.486, "pct_cuda_time": 2.8852712907552784, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.096, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1752.564, "cuda_time_us": 65.215, "pct_cuda_time": 0.8897183133947661, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.954, "cuda_time_us": 22.144, "pct_cuda_time": 0.3021072196858652, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.144, "pct_cuda_time": 0.3021072196858652, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 556.337, "cuda_time_us": 3.872, "pct_cuda_time": 0.052825106332355036, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.052825106332355036, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 729.228, "cuda_time_us": 22.527, "pct_cuda_time": 0.30733243035871954, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.687, "pct_cuda_time": 0.2549438951530782, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 177.689, "cuda_time_us": 16.672, "pct_cuda_time": 0.22745355701782624, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.672, "pct_cuda_time": 0.22745355701782624, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.836, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 419.219, "cuda_time_us": 140.031, "pct_cuda_time": 1.9104216076513456, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 139.712, "cuda_time_us": 83.039, "pct_cuda_time": 1.1328884309742846, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.039, "pct_cuda_time": 1.1328884309742846, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.945, "cuda_time_us": 9.088, "pct_cuda_time": 0.12398619998668456, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.12398619998668456, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.037, "cuda_time_us": 47.904, "pct_cuda_time": 0.6535469766903761, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 45.344, "pct_cuda_time": 0.6186212865532819, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2350.01, "cuda_time_us": 213.149, "pct_cuda_time": 2.90795934649668, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.951, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1691.76, "cuda_time_us": 65.24799999999999, "pct_cuda_time": 0.8901685273691893, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.97, "cuda_time_us": 22.208, "pct_cuda_time": 0.30298036193929256, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.208, "pct_cuda_time": 0.30298036193929256, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 485.742, "cuda_time_us": 3.904, "pct_cuda_time": 0.05326167745906872, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.904, "pct_cuda_time": 0.05326167745906872, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 729.171, "cuda_time_us": 22.368999999999996, "pct_cuda_time": 0.3051768604205707, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.496, "pct_cuda_time": 0.2523381112405059, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.0179130590429706, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.755, "cuda_time_us": 16.767, "pct_cuda_time": 0.22874962755025746, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.767, "pct_cuda_time": 0.22874962755025746, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.127, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.976, "pct_cuda_time": 0.040601114784372054, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 442.696, "cuda_time_us": 141.821, "pct_cuda_time": 1.9348423050518915, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 139.131, "cuda_time_us": 83.327, "pct_cuda_time": 1.1368175711147077, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.327, "pct_cuda_time": 1.1368175711147077, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.064, "cuda_time_us": 9.215, "pct_cuda_time": 0.12571884164582947, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.215, "pct_cuda_time": 0.12571884164582947, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.839, "cuda_time_us": 49.278999999999996, "pct_cuda_time": 0.6723058922913544, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 46.751, "pct_cuda_time": 0.6378167732809737, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2493.999, "cuda_time_us": 211.55, "pct_cuda_time": 2.886144433008706, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.402, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1794.134, "cuda_time_us": 65.66499999999999, "pct_cuda_time": 0.895857594864177, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.415, "cuda_time_us": 22.432, "pct_cuda_time": 0.3060363598262883, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.432, "pct_cuda_time": 0.3060363598262883, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 518.592, "cuda_time_us": 4.16, "pct_cuda_time": 0.05675424647277815, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.16, "pct_cuda_time": 0.05675424647277815, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 766.712, "cuda_time_us": 22.368999999999996, "pct_cuda_time": 0.3051768604205707, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.496, "pct_cuda_time": 0.2523381112405059, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.0179130590429706, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.605, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.256, "cuda_time_us": 3.071, "pct_cuda_time": 0.041897185316803295, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.041897185316803295, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.038, "cuda_time_us": 139.71, "pct_cuda_time": 1.906042253536499, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 147.208, "cuda_time_us": 83.807, "pct_cuda_time": 1.143366138015413, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.807, "pct_cuda_time": 1.143366138015413, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 107.805, "cuda_time_us": 9.184, "pct_cuda_time": 0.12529591336682558, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.12529591336682558, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.472, "cuda_time_us": 46.719, "pct_cuda_time": 0.6373802021542602, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.127, "pct_cuda_time": 0.6020179408904522, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.03536226126380792, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2338.471, "cuda_time_us": 211.57999999999998, "pct_cuda_time": 2.88655371844, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.609, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1681.405, "cuda_time_us": 65.086, "pct_cuda_time": 0.8879583860402015, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.112, "cuda_time_us": 22.303, "pct_cuda_time": 0.3042764324717238, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.303, "pct_cuda_time": 0.3042764324717238, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 469.669, "cuda_time_us": 3.776, "pct_cuda_time": 0.05151539295221401, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05151539295221401, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 732.897, "cuda_time_us": 22.464, "pct_cuda_time": 0.306472930953002, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.592, "pct_cuda_time": 0.25364782462064694, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.018772558448688154, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.585, "cuda_time_us": 16.543, "pct_cuda_time": 0.22569362966326176, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.543, "pct_cuda_time": 0.22569362966326176, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.208, "cuda_time_us": 2.848, "pct_cuda_time": 0.03885483027751734, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.848, "pct_cuda_time": 0.03885483027751734, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 428.15, "cuda_time_us": 140.51, "pct_cuda_time": 1.9169565317043404, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 140.649, "cuda_time_us": 83.679, "pct_cuda_time": 1.1416198535085582, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.679, "pct_cuda_time": 1.1416198535085582, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.745, "cuda_time_us": 9.216, "pct_cuda_time": 0.12573248449353927, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.216, "pct_cuda_time": 0.12573248449353927, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.629, "cuda_time_us": 47.615, "pct_cuda_time": 0.6496041937022432, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 45.087, "pct_cuda_time": 0.6151150746918627, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2265.101, "cuda_time_us": 210.621, "pct_cuda_time": 2.8734702274862998, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.687, "cuda_time_us": 3.103, "pct_cuda_time": 0.042333756443516966, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.042333756443516966, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1618.725, "cuda_time_us": 64.735, "pct_cuda_time": 0.8831697464940608, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.46, "cuda_time_us": 22.112, "pct_cuda_time": 0.3016706485591515, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.112, "pct_cuda_time": 0.3016706485591515, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 455.238, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 709.299, "cuda_time_us": 22.366999999999997, "pct_cuda_time": 0.3051495747251511, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.592, "pct_cuda_time": 0.25364782462064694, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.279, "pct_cuda_time": 0.017449202220837318, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.88, "cuda_time_us": 16.512, "pct_cuda_time": 0.22527070138425787, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.512, "pct_cuda_time": 0.22527070138425787, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.046, "cuda_time_us": 3.137, "pct_cuda_time": 0.04279761326565025, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.04279761326565025, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 430.267, "cuda_time_us": 139.64600000000002, "pct_cuda_time": 1.9051691112830715, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 138.532, "cuda_time_us": 83.423, "pct_cuda_time": 1.138127284494849, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.423, "pct_cuda_time": 1.138127284494849, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.749, "cuda_time_us": 9.152, "pct_cuda_time": 0.12485934224011191, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.12485934224011191, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.672, "cuda_time_us": 47.071000000000005, "pct_cuda_time": 0.6421824845481107, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.511, "pct_cuda_time": 0.6072567944110164, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2393.233, "cuda_time_us": 211.453, "pct_cuda_time": 2.884821076780855, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.712, "cuda_time_us": 3.36, "pct_cuda_time": 0.045839968304936196, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.36, "pct_cuda_time": 0.045839968304936196, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1731.536, "cuda_time_us": 65.439, "pct_cuda_time": 0.8927743112817617, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.205, "cuda_time_us": 22.464, "pct_cuda_time": 0.306472930953002, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.464, "pct_cuda_time": 0.306472930953002, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 525.211, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05107882182550033, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 733.466, "cuda_time_us": 22.784, "pct_cuda_time": 0.31083864222013874, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.72, "pct_cuda_time": 0.03710854577066264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.752, "pct_cuda_time": 0.2558306802542153, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 175.355, "cuda_time_us": 16.447, "pct_cuda_time": 0.22438391628312068, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.447, "pct_cuda_time": 0.22438391628312068, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.462, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 431.309, "cuda_time_us": 139.64600000000002, "pct_cuda_time": 1.9051691112830715, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 144.176, "cuda_time_us": 83.903, "pct_cuda_time": 1.1446758513955542, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.903, "pct_cuda_time": 1.1446758513955542, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.204, "cuda_time_us": 9.248, "pct_cuda_time": 0.12616905562025296, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.248, "pct_cuda_time": 0.12616905562025296, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.507, "cuda_time_us": 46.495000000000005, "pct_cuda_time": 0.6343242042672644, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 43.935, "pct_cuda_time": 0.5993985141301702, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2575.456, "cuda_time_us": 211.22899999999998, "pct_cuda_time": 2.881765078893859, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.353, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1891.632, "cuda_time_us": 65.69500000000001, "pct_cuda_time": 0.8962668802954713, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.994, "cuda_time_us": 22.368, "pct_cuda_time": 0.30516321757286097, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.368, "pct_cuda_time": 0.30516321757286097, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 469.177, "cuda_time_us": 3.776, "pct_cuda_time": 0.05151539295221401, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05151539295221401, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 948.561, "cuda_time_us": 23.071, "pct_cuda_time": 0.3147541395128521, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.035798832390521604, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.167, "pct_cuda_time": 0.26149246205378335, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 179.145, "cuda_time_us": 16.48, "pct_cuda_time": 0.22483413025754423, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.48, "pct_cuda_time": 0.22483413025754423, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.977, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04103768591108574, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 427.916, "cuda_time_us": 139.39, "pct_cuda_time": 1.9016765422693618, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.256, "cuda_time_us": 83.071, "pct_cuda_time": 1.1333250021009984, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.071, "pct_cuda_time": 1.1333250021009984, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.438, "cuda_time_us": 9.568, "pct_cuda_time": 0.13053476688738974, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.568, "pct_cuda_time": 0.13053476688738974, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.676, "cuda_time_us": 46.751000000000005, "pct_cuda_time": 0.6378167732809739, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.191, "pct_cuda_time": 0.6028910831438796, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03492569013709425, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2352.31, "cuda_time_us": 211.38899999999995, "pct_cuda_time": 2.883947934527427, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.62, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1706.628, "cuda_time_us": 65.727, "pct_cuda_time": 0.896703451422185, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.528, "cuda_time_us": 22.368, "pct_cuda_time": 0.30516321757286097, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.368, "pct_cuda_time": 0.30516321757286097, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.991, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 720.565, "cuda_time_us": 22.943, "pct_cuda_time": 0.31300785500599737, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.035798832390521604, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.039, "pct_cuda_time": 0.25974617754692864, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.017462845068547124, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 202.441, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.15, "cuda_time_us": 2.975, "pct_cuda_time": 0.040587471936662255, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.975, "pct_cuda_time": 0.040587471936662255, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 422.688, "cuda_time_us": 139.58299999999997, "pct_cuda_time": 1.9043096118773535, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.221, "cuda_time_us": 83.231, "pct_cuda_time": 1.1355078577345668, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.231, "pct_cuda_time": 1.1355078577345668, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.13, "cuda_time_us": 9.6, "pct_cuda_time": 0.1309713380141034, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.6, "pct_cuda_time": 0.1309713380141034, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.785, "cuda_time_us": 46.751999999999995, "pct_cuda_time": 0.6378304161286836, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.16, "pct_cuda_time": 0.6024681548648756, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.03536226126380792, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2437.637, "cuda_time_us": 211.902, "pct_cuda_time": 2.890946715402556, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.34, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04278397041794045, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1796.51, "cuda_time_us": 65.407, "pct_cuda_time": 0.892337740155048, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.081, "cuda_time_us": 22.399, "pct_cuda_time": 0.30558614585186483, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.399, "pct_cuda_time": 0.30558614585186483, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 509.69, "cuda_time_us": 3.808, "pct_cuda_time": 0.05195196407892769, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05195196407892769, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 767.983, "cuda_time_us": 22.816, "pct_cuda_time": 0.31127521334685243, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.008, "pct_cuda_time": 0.25932324926792477, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 235.861, "cuda_time_us": 16.384, "pct_cuda_time": 0.22352441687740315, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.384, "pct_cuda_time": 0.22352441687740315, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.558, "cuda_time_us": 3.073, "pct_cuda_time": 0.041924471012222894, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.073, "pct_cuda_time": 0.041924471012222894, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 421.615, "cuda_time_us": 140.286, "pct_cuda_time": 1.913900533817345, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 144.991, "cuda_time_us": 83.743, "pct_cuda_time": 1.1424929957619856, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.743, "pct_cuda_time": 1.1424929957619856, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.881, "cuda_time_us": 9.216, "pct_cuda_time": 0.12573248449353927, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.216, "pct_cuda_time": 0.12573248449353927, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.196, "cuda_time_us": 47.327, "pct_cuda_time": 0.64567505356182, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.735, "pct_cuda_time": 0.610312792298012, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.03536226126380792, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2407.823, "cuda_time_us": 211.293, "pct_cuda_time": 2.8826382211472867, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.322, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04322054154465413, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1752.503, "cuda_time_us": 65.56700000000001, "pct_cuda_time": 0.8945205957886165, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.946, "cuda_time_us": 22.368, "pct_cuda_time": 0.30516321757286097, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.368, "pct_cuda_time": 0.30516321757286097, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.426, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 793.505, "cuda_time_us": 23.2, "pct_cuda_time": 0.31651406686741657, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 19.327, "pct_cuda_time": 0.26367531768735175, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.345, "pct_cuda_time": 0.01834963016968428, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 177.389, "cuda_time_us": 16.287, "pct_cuda_time": 0.2222010606495523, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.287, "pct_cuda_time": 0.2222010606495523, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.73, "cuda_time_us": 2.912, "pct_cuda_time": 0.0397279725309447, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.912, "pct_cuda_time": 0.0397279725309447, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 424.463, "cuda_time_us": 139.646, "pct_cuda_time": 1.9051691112830713, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.366, "cuda_time_us": 83.359, "pct_cuda_time": 1.1372541422414215, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.359, "pct_cuda_time": 1.1372541422414215, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.286, "cuda_time_us": 9.408, "pct_cuda_time": 0.12835191125382134, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.408, "pct_cuda_time": 0.12835191125382134, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 130.389, "cuda_time_us": 46.879, "pct_cuda_time": 0.6395630577878285, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 44.351, "pct_cuda_time": 0.605073938777448, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.03448911901038056, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2468.478, "cuda_time_us": 212.382, "pct_cuda_time": 2.897495282303262, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.753, "cuda_time_us": 3.328, "pct_cuda_time": 0.04540339717822252, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.04540339717822252, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1772.098, "cuda_time_us": 66.01599999999999, "pct_cuda_time": 0.9006462344103177, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.555, "cuda_time_us": 23.2, "pct_cuda_time": 0.31651406686741657, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 23.2, "pct_cuda_time": 0.31651406686741657, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[32, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 505.546, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05064225069878666, "trace": "_C::rotary_embedding(int64[32], bfloat16[32, 4096], bfloat16[32, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 762.365, "cuda_time_us": 22.4, "pct_cuda_time": 0.30559978869957466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.034052547883666885, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[32], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 18.592, "pct_cuda_time": 0.25364782462064694, "trace": "_vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.017899416195260802, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[32, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[32, 1, 32, 128], None, None, None, None, int32[32], None, None, int32[32, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[32, 32, 128], bfloat16[32, 8, 128], bfloat16[32, 8, 128], bfloat16[32, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 204.759, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.704, "pct_cuda_time": 0.22789012814453993, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[32, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.087, "cuda_time_us": 2.944, "pct_cuda_time": 0.04016454365765838, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.944, "pct_cuda_time": 0.04016454365765838, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 453.659, "cuda_time_us": 140.094, "pct_cuda_time": 1.911281107057063, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.28, "cuda_time_us": 84.223, "pct_cuda_time": 1.1490415626626909, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.223, "pct_cuda_time": 1.1490415626626909, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[32, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.175, "cuda_time_us": 9.664, "pct_cuda_time": 0.13184448026753076, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.664, "pct_cuda_time": 0.13184448026753076, "trace": "_C::silu_and_mul(bfloat16[32, 14336], bfloat16[32, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.072, "cuda_time_us": 46.207, "pct_cuda_time": 0.6303950641268413, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_s16816gemm_bf16_128x64_64x6_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 43.648, "pct_cuda_time": 0.5954830168374569, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, float, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, float const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, float const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.559, "pct_cuda_time": 0.03491204728938444, "trace": "mm(bfloat16[32, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[32, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[32, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.763, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04234739929122677, "trace": "_C::fused_add_rms_norm(bfloat16[32, 4096], bfloat16[32, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 723.568, "cuda_time_us": 393.467, "pct_cuda_time": 5.3680103598328355, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 3.584, "pct_cuda_time": 0.04889596619193194, "trace": "index_select(bfloat16[32, 4096], 0, int64[32])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010041135914414596, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[32, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 389.147, "pct_cuda_time": 5.30907325772649, "trace": "mm(bfloat16[32, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[32, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[32, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 6050.411, "cuda_time_us": 161.33999999999997, "pct_cuda_time": 2.201137049499525, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.01135084929455563, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010041135914414596, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.010914278167841952, "trace": "copy_(int32[32], int32[32], True) <- _to_copy(int32[32], 3, 0, None, None, True, None) <- to(int32[32], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010041135914414596, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.010914278167841952, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.01135084929455563, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.010914278167841952, "trace": "copy_(bfloat16[32], bfloat16[32], True) <- _to_copy(bfloat16[32], 15, 0, None, None, True, None) <- to(bfloat16[32], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 12.672, "pct_cuda_time": 0.17288216617861651, "trace": "copy_(float32[32, 128256], bfloat16[32, 128256], False) <- _to_copy(bfloat16[32, 128256], 6, None, None, None, False, None) <- to(bfloat16[32, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 20.0, "pct_cuda_time": 0.2728569541960488, "trace": "div_(float32[32, 128256], bfloat16[32, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 36.159, "pct_cuda_time": 0.49331173033874637, "trace": "_softmax(float32[32, 128256], -1, False) <- softmax(float32[32, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 32.927, "pct_cuda_time": 0.4492180465406649, "trace": "_log_softmax(float32[32, 128256], -1, False) <- log_softmax(float32[32, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.952, "pct_cuda_time": 0.02663083872953436, "trace": "copy_(int64[32], int32[32], False) <- _to_copy(int32[32], 4, None, None, None, False, None) <- to(int32[32], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 20.031, "pct_cuda_time": 0.27327988247505264, "trace": "index(float32[32, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 29.599, "pct_cuda_time": 0.4038146493624424, "trace": "argmax(float32[32, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.03361597675695321, "trace": "copy_(int64[32], int64[32], False) <- _to_copy(int64[32], 4, 0, None, None, False, None) <- to(int64[32], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] } }