{ "context": { "python_version": "3.12.9 | packaged by Anaconda, Inc. | (main, Feb 6 2025, 18:56:27) [GCC 11.2.0]", "torch_version": "2.5.1+cu124", "engine_args": { "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "served_model_name": null, "tokenizer": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "task": "auto", "skip_tokenizer_init": false, "tokenizer_mode": "auto", "trust_remote_code": false, "allowed_local_media_path": null, "download_dir": null, "load_format": "dummy", "config_format": "auto", "dtype": "auto", "kv_cache_dtype": "auto", "seed": 0, "max_model_len": null, "distributed_executor_backend": null, "pipeline_parallel_size": 1, "tensor_parallel_size": 1, "max_parallel_loading_workers": null, "block_size": null, "enable_prefix_caching": false, "disable_sliding_window": false, "use_v2_block_manager": true, "swap_space": 4, "cpu_offload_gb": 0, "gpu_memory_utilization": 0.9, "max_num_batched_tokens": 8000, "max_num_partial_prefills": 1, "max_long_partial_prefills": 1, "long_prefill_token_threshold": 0, "max_num_seqs": 256, "max_logprobs": 20, "disable_log_stats": false, "revision": null, "code_revision": null, "rope_scaling": null, "rope_theta": null, "hf_overrides": null, "tokenizer_revision": null, "quantization": null, "enforce_eager": true, "max_seq_len_to_capture": 8192, "disable_custom_all_reduce": false, "tokenizer_pool_size": 0, "tokenizer_pool_type": "ray", "tokenizer_pool_extra_config": null, "limit_mm_per_prompt": null, "mm_processor_kwargs": null, "disable_mm_preprocessor_cache": false, "enable_lora": false, "enable_lora_bias": false, "max_loras": 1, "max_lora_rank": 16, "enable_prompt_adapter": false, "max_prompt_adapters": 1, "max_prompt_adapter_token": 0, "fully_sharded_loras": false, "lora_extra_vocab_size": 256, "long_lora_scaling_factors": null, "lora_dtype": "auto", "max_cpu_loras": null, "device": "auto", "num_scheduler_steps": 1, "multi_step_stream_outputs": true, "ray_workers_use_nsight": false, "num_gpu_blocks_override": null, "num_lookahead_slots": 0, "model_loader_extra_config": null, "ignore_patterns": [], "preemption_mode": null, "scheduler_delay_factor": 0.0, "enable_chunked_prefill": null, "guided_decoding_backend": "xgrammar", "logits_processor_pattern": null, "speculative_model": null, "speculative_model_quantization": null, "speculative_draft_tensor_parallel_size": null, "num_speculative_tokens": null, "speculative_disable_mqa_scorer": false, "speculative_max_model_len": null, "speculative_disable_by_batch_size": null, "ngram_prompt_lookup_max": null, "ngram_prompt_lookup_min": null, "spec_decoding_acceptance_method": "rejection_sampler", "typical_acceptance_sampler_posterior_threshold": null, "typical_acceptance_sampler_posterior_alpha": null, "qlora_adapter_name_or_path": null, "disable_logprobs_during_spec_decoding": null, "otlp_traces_endpoint": null, "collect_detailed_traces": null, "disable_async_output_proc": false, "scheduling_policy": "fcfs", "scheduler_cls": "vllm.core.scheduler.Scheduler", "override_neuron_config": null, "override_pooler_config": null, "compilation_config": null, "worker_cls": "auto", "kv_transfer_config": null, "generation_config": null, "override_generation_config": null, "enable_sleep_mode": false, "model_impl": "auto", "calculate_kv_scales": false, "additional_config": null }, "prompt_len": 0, "batch_size": 8, "num_steps": 2, "complete_num_requests_per_step": null, "save_chrome_traces_folder": null }, "prefill": { "metadata": { "num_running_seqs": null }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 43659.561, "pct_cuda_time": 98.90983322016886, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 57.343, "pct_cuda_time": 0.1299093815062443, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 57.343, "pct_cuda_time": 0.1299093815062443, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 43582.826, "pct_cuda_time": 98.73599166339852, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 1283.665, "pct_cuda_time": 2.908116530547984, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 25.696, "pct_cuda_time": 0.05821375699186392, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 1257.9689999999998, "pct_cuda_time": 2.8499027735561193, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 10348.571000000002, "pct_cuda_time": 23.444473747161044, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 4603.5199999999995, "pct_cuda_time": 10.429179428206155, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.84000000000001, "pct_cuda_time": 0.054009027346125324, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 4579.679999999999, "pct_cuda_time": 10.375170400860029, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 797.4649999999998, "pct_cuda_time": 1.8066404778765859, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 797.4649999999998, "pct_cuda_time": 1.8066404778765859, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 1411.28, "pct_cuda_time": 3.1972256758825384, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 359.67600000000004, "pct_cuda_time": 0.8148385452913157, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 1007.2510000000001, "pct_cuda_time": 2.281906325646479, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 44.35300000000001, "pct_cuda_time": 0.10048080494474397, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 3536.306, "pct_cuda_time": 8.011428165195762, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 24.736000000000015, "pct_cuda_time": 0.05603889683027501, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 3511.57, "pct_cuda_time": 7.955389268365488, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 31950.590000000004, "pct_cuda_time": 72.38340138568951, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 19854.49100000001, "pct_cuda_time": 44.97993906721473, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.712000000000003, "pct_cuda_time": 0.05371904599124678, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 19830.779, "pct_cuda_time": 44.926220021223465, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 2830.939, "pct_cuda_time": 6.4134338031129445, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 2830.939, "pct_cuda_time": 6.4134338031129445, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 9265.160000000002, "pct_cuda_time": 20.990028515361843, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 9265.160000000002, "pct_cuda_time": 20.990028515361843, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 19.392, "pct_cuda_time": 0.043932175264096554, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 19.392, "pct_cuda_time": 0.043932175264096554, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 358.843, "pct_cuda_time": 0.8129514010052703, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 5.792, "pct_cuda_time": 0.013121656308253262, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 352.315, "pct_cuda_time": 0.7981623519064655, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 122.36500000000001, "pct_cuda_time": 0.27721537882586506, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.374999999999999, "pct_cuda_time": 0.012176951425563064, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 4.831, "pct_cuda_time": 0.010944530667329335, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 6.4, "pct_cuda_time": 0.014499067743926257, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 35.488, "pct_cuda_time": 0.08039733064007108, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.511, "pct_cuda_time": 0.06459108131985648, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.856, "pct_cuda_time": 0.004204729645738614, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 7.616, "pct_cuda_time": 0.017253890615272244, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 29.12, "pct_cuda_time": 0.06597075823486447, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 3.168, "pct_cuda_time": 0.007177038533243497, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 85594.608, "cuda_time_us": 43659.561, "pct_cuda_time": 98.90983322016886, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 294.704, "cuda_time_us": 57.343, "pct_cuda_time": 0.1299093815062443, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 57.343, "pct_cuda_time": 0.1299093815062443, "trace": "index_select(bfloat16[128256, 4096], 0, int64[2048]) <- embedding(bfloat16[128256, 4096], int64[2048], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4033.827, "cuda_time_us": 1369.04, "pct_cuda_time": 3.101531828772625, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 223.505, "cuda_time_us": 25.696, "pct_cuda_time": 0.05821375699186392, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.696, "pct_cuda_time": 0.05821375699186392, "trace": "_C::rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2981.635, "cuda_time_us": 326.20399999999995, "pct_cuda_time": 0.7390084209905812, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 418.573, "cuda_time_us": 146.846, "pct_cuda_time": 0.33267657842571796, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.001884878806710413, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 146.014, "pct_cuda_time": 0.33079169961900756, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 985.874, "cuda_time_us": 25.184, "pct_cuda_time": 0.05705383157234982, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.184, "pct_cuda_time": 0.05705383157234982, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1022.561, "cuda_time_us": 43.67999999999999, "pct_cuda_time": 0.09895613735229668, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.296, "pct_cuda_time": 0.025590854568029838, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 30.944, "pct_cuda_time": 0.07010299254188344, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0032622902423834073, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 301.251, "cuda_time_us": 110.494, "pct_cuda_time": 0.2503218736402168, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.758, "pct_cuda_time": 0.24865448084966532, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 119.64, "cuda_time_us": 19.84, "pct_cuda_time": 0.04494711000617139, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.84, "pct_cuda_time": 0.04494711000617139, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 582.486, "cuda_time_us": 997.3, "pct_cuda_time": 2.2593625407840086, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 201.294, "cuda_time_us": 620.472, "pct_cuda_time": 1.4056664939389705, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.736, "pct_cuda_time": 1.403999101148419, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 136.144, "cuda_time_us": 87.999, "pct_cuda_time": 0.199359915999651, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 87.999, "pct_cuda_time": 0.199359915999651, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 166.839, "cuda_time_us": 288.829, "pct_cuda_time": 0.6543361308453869, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.829, "pct_cuda_time": 0.6543361308453869, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2679.129, "cuda_time_us": 1356.463, "pct_cuda_time": 3.073038895176475, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.116, "cuda_time_us": 19.68, "pct_cuda_time": 0.04458463331257324, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.68, "pct_cuda_time": 0.04458463331257324, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1948.098, "cuda_time_us": 321.91600000000005, "pct_cuda_time": 0.7292940456021508, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 169.027, "cuda_time_us": 143.10100000000003, "pct_cuda_time": 0.32419235831618615, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.001665127311216531, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.366, "pct_cuda_time": 0.3225272310049696, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 582.888, "cuda_time_us": 25.152, "pct_cuda_time": 0.056981336233630185, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.152, "pct_cuda_time": 0.056981336233630185, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 817.378, "cuda_time_us": 43.84, "pct_cuda_time": 0.09931861404589486, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.025518359229310207, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.296, "pct_cuda_time": 0.07090044126779939, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 210.218, "cuda_time_us": 109.82300000000001, "pct_cuda_time": 0.24880173700643957, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.087, "pct_cuda_time": 0.24713434421588804, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.517, "cuda_time_us": 19.808, "pct_cuda_time": 0.04487461466745176, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.808, "pct_cuda_time": 0.04487461466745176, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.256, "cuda_time_us": 995.059, "pct_cuda_time": 2.254285601594299, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.127, "cuda_time_us": 619.128, "pct_cuda_time": 1.4026216897127461, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 618.392, "pct_cuda_time": 1.4009542969221946, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 105.877, "cuda_time_us": 87.871, "pct_cuda_time": 0.1990699346447725, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 87.871, "pct_cuda_time": 0.1990699346447725, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.184, "cuda_time_us": 288.06, "pct_cuda_time": 0.6525939772367808, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.06, "pct_cuda_time": 0.6525939772367808, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2509.871, "cuda_time_us": 1360.558, "pct_cuda_time": 3.0823160330532526, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.982, "cuda_time_us": 20.255, "pct_cuda_time": 0.045887283930191605, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.255, "pct_cuda_time": 0.045887283930191605, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1809.29, "cuda_time_us": 322.39500000000004, "pct_cuda_time": 0.7303792102036103, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 159.7, "cuda_time_us": 143.102, "pct_cuda_time": 0.3241946237955211, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.366, "pct_cuda_time": 0.3225272310049696, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 508.836, "cuda_time_us": 24.895, "pct_cuda_time": 0.05639910804453815, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.895, "pct_cuda_time": 0.05639910804453815, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 757.644, "cuda_time_us": 43.968, "pct_cuda_time": 0.09960859540077338, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.136, "pct_cuda_time": 0.025228377874431683, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.552, "pct_cuda_time": 0.07148040397755644, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 211.155, "cuda_time_us": 110.43, "pct_cuda_time": 0.25017688296277757, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.662, "pct_cuda_time": 0.24843699483350642, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.914, "cuda_time_us": 20.288, "pct_cuda_time": 0.04596204474824623, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.288, "pct_cuda_time": 0.04596204474824623, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.434, "cuda_time_us": 997.62, "pct_cuda_time": 2.260087494171205, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.693, "cuda_time_us": 620.121, "pct_cuda_time": 1.4048713106923896, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0016696582698865078, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.384, "pct_cuda_time": 1.403201652422503, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.084, "cuda_time_us": 88.575, "pct_cuda_time": 0.20066483209660438, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.575, "pct_cuda_time": 0.20066483209660438, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.594, "cuda_time_us": 288.924, "pct_cuda_time": 0.6545513513822109, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.924, "pct_cuda_time": 0.6545513513822109, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2488.424, "cuda_time_us": 1358.417, "pct_cuda_time": 3.0774656417970423, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.789, "cuda_time_us": 19.68, "pct_cuda_time": 0.04458463331257324, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.68, "pct_cuda_time": 0.04458463331257324, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1769.936, "cuda_time_us": 321.726, "pct_cuda_time": 0.7288636045285029, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.254, "cuda_time_us": 143.968, "pct_cuda_time": 0.3261565288996211, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0016696582698865078, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 143.231, "pct_cuda_time": 0.3244868706297346, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 490.67, "cuda_time_us": 24.416, "pct_cuda_time": 0.05531394344307866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.416, "pct_cuda_time": 0.05531394344307866, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 754.426, "cuda_time_us": 43.744, "pct_cuda_time": 0.09910112802973595, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 10.944, "pct_cuda_time": 0.024793405842113897, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.328, "pct_cuda_time": 0.07097293660651902, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0033347855811030384, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 217.843, "cuda_time_us": 109.598, "pct_cuda_time": 0.24829200415606717, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0017376226499361623, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 108.831, "pct_cuda_time": 0.246554381506131, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.977, "cuda_time_us": 20.0, "pct_cuda_time": 0.04530958669976955, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.0, "pct_cuda_time": 0.04530958669976955, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 476.634, "cuda_time_us": 997.011, "pct_cuda_time": 2.2587078172561967, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.812, "cuda_time_us": 619.864, "pct_cuda_time": 1.4042890825032976, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.128, "pct_cuda_time": 1.4026216897127461, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.217, "cuda_time_us": 88.254, "pct_cuda_time": 0.19993761323007309, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.254, "pct_cuda_time": 0.19993761323007309, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.084, "cuda_time_us": 288.893, "pct_cuda_time": 0.6544811215228261, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.893, "pct_cuda_time": 0.6544811215228261, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2527.723, "cuda_time_us": 1355.182, "pct_cuda_time": 3.070136816148355, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.747, "cuda_time_us": 19.551, "pct_cuda_time": 0.044292386478359716, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.551, "pct_cuda_time": 0.044292386478359716, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1834.056, "cuda_time_us": 321.532, "pct_cuda_time": 0.7284241015375151, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.444, "cuda_time_us": 143.166, "pct_cuda_time": 0.3243396144729604, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.43, "pct_cuda_time": 0.3226722216824089, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 564.995, "cuda_time_us": 24.672, "pct_cuda_time": 0.05589390615283571, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.672, "pct_cuda_time": 0.05589390615283571, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 750.287, "cuda_time_us": 44.192, "pct_cuda_time": 0.1001160627718108, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.201, "pct_cuda_time": 0.025375634031205935, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.519, "pct_cuda_time": 0.07140564315950182, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0033347855811030384, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.393, "cuda_time_us": 109.502, "pct_cuda_time": 0.24807451813990822, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 108.734, "pct_cuda_time": 0.2463346300106371, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.594, "cuda_time_us": 19.744, "pct_cuda_time": 0.0447296239900125, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.744, "pct_cuda_time": 0.0447296239900125, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 461.155, "cuda_time_us": 994.355, "pct_cuda_time": 2.2526907041424673, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.916, "cuda_time_us": 617.144, "pct_cuda_time": 1.3981269787121289, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 616.408, "pct_cuda_time": 1.3964595859215774, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.665, "cuda_time_us": 88.286, "pct_cuda_time": 0.2000101085687927, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.286, "pct_cuda_time": 0.2000101085687927, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.183, "cuda_time_us": 288.925, "pct_cuda_time": 0.6545536168615459, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.925, "pct_cuda_time": 0.6545536168615459, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2651.877, "cuda_time_us": 1358.445, "pct_cuda_time": 3.0775290752184223, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.673, "cuda_time_us": 19.359, "pct_cuda_time": 0.04385741444604194, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.359, "pct_cuda_time": 0.04385741444604194, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1902.606, "cuda_time_us": 323.419, "pct_cuda_time": 0.7326990610426384, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.026, "cuda_time_us": 143.358, "pct_cuda_time": 0.32477458650527813, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.622, "pct_cuda_time": 0.3231071937147267, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 658.382, "cuda_time_us": 24.767, "pct_cuda_time": 0.056109126689659616, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.767, "pct_cuda_time": 0.056109126689659616, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 729.271, "cuda_time_us": 43.679, "pct_cuda_time": 0.09895387187296172, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.025445863890590576, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.167, "pct_cuda_time": 0.07060819443358587, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 205.988, "cuda_time_us": 111.615, "pct_cuda_time": 0.2528614759747389, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.792, "pct_cuda_time": 0.004059738968299352, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.823, "pct_cuda_time": 0.24880173700643954, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.113, "cuda_time_us": 20.352, "pct_cuda_time": 0.04610703542568549, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.352, "pct_cuda_time": 0.04610703542568549, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 495.541, "cuda_time_us": 995.315, "pct_cuda_time": 2.2548655643040565, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 195.515, "cuda_time_us": 618.104, "pct_cuda_time": 1.4003018388737178, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 617.368, "pct_cuda_time": 1.3986344460831663, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.382, "cuda_time_us": 88.479, "pct_cuda_time": 0.2004473460804455, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.479, "pct_cuda_time": 0.2004473460804455, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.4, "cuda_time_us": 288.732, "pct_cuda_time": 0.6541163793498932, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.732, "pct_cuda_time": 0.6541163793498932, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2501.842, "cuda_time_us": 1360.524, "pct_cuda_time": 3.082239006755863, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.278, "cuda_time_us": 19.584, "pct_cuda_time": 0.04436714729641434, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.584, "pct_cuda_time": 0.04436714729641434, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1793.03, "cuda_time_us": 322.009, "pct_cuda_time": 0.7295047351803047, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.63, "cuda_time_us": 143.292, "pct_cuda_time": 0.32462506486916887, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.001665127311216531, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.557, "pct_cuda_time": 0.3229599375579523, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 515.958, "cuda_time_us": 24.672, "pct_cuda_time": 0.05589390615283571, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.672, "pct_cuda_time": 0.05589390615283571, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 752.824, "cuda_time_us": 43.839, "pct_cuda_time": 0.09931634856655985, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.104, "pct_cuda_time": 0.025155882535712052, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.327, "pct_cuda_time": 0.07097067112718404, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.003189794903663776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 218.785, "cuda_time_us": 110.206, "pct_cuda_time": 0.24966941559174016, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.47, "pct_cuda_time": 0.2480020228011886, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.362, "cuda_time_us": 20.512, "pct_cuda_time": 0.04646951211928365, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.512, "pct_cuda_time": 0.04646951211928365, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 478.248, "cuda_time_us": 998.4189999999999, "pct_cuda_time": 2.26189761215986, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.763, "cuda_time_us": 621.3679999999999, "pct_cuda_time": 1.40769636342312, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 620.632, "pct_cuda_time": 1.4060289706325686, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.137, "cuda_time_us": 88.639, "pct_cuda_time": 0.20080982277404366, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.639, "pct_cuda_time": 0.20080982277404366, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.243, "cuda_time_us": 288.412, "pct_cuda_time": 0.6533914259626966, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.412, "pct_cuda_time": 0.6533914259626966, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2538.153, "cuda_time_us": 1360.3020000000001, "pct_cuda_time": 3.081736070343496, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.298, "cuda_time_us": 19.136, "pct_cuda_time": 0.0433522125543395, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.136, "pct_cuda_time": 0.0433522125543395, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1844.516, "cuda_time_us": 321.94899999999996, "pct_cuda_time": 0.7293688064202052, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.66, "cuda_time_us": 142.91, "pct_cuda_time": 0.32375965176320326, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.174, "pct_cuda_time": 0.3220922589726518, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 545.893, "cuda_time_us": 25.247, "pct_cuda_time": 0.05719655677045409, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.247, "pct_cuda_time": 0.05719655677045409, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 775.186, "cuda_time_us": 43.839999999999996, "pct_cuda_time": 0.09931861404589484, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.008, "pct_cuda_time": 0.024938396519553156, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.392, "pct_cuda_time": 0.07111792728395828, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0032622902423834073, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 210.339, "cuda_time_us": 109.952, "pct_cuda_time": 0.24909398384065307, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0016696582698865078, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.215, "pct_cuda_time": 0.24742432557076657, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.198, "cuda_time_us": 20.479, "pct_cuda_time": 0.046394751301229026, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.479, "pct_cuda_time": 0.046394751301229026, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 464.947, "cuda_time_us": 998.738, "pct_cuda_time": 2.262620300067722, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.816, "cuda_time_us": 620.695, "pct_cuda_time": 1.4061716958306731, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.001665127311216531, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.96, "pct_cuda_time": 1.4045065685194564, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.612, "cuda_time_us": 88.639, "pct_cuda_time": 0.20080982277404366, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.639, "pct_cuda_time": 0.20080982277404366, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.056, "cuda_time_us": 289.404, "pct_cuda_time": 0.6556387814630052, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 289.404, "pct_cuda_time": 0.6556387814630052, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2622.492, "cuda_time_us": 1354.6070000000002, "pct_cuda_time": 3.068834165530737, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.94, "cuda_time_us": 19.264, "pct_cuda_time": 0.04364219390921802, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.264, "pct_cuda_time": 0.04364219390921802, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1814.083, "cuda_time_us": 320.829, "pct_cuda_time": 0.7268314695650182, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 178.132, "cuda_time_us": 143.391, "pct_cuda_time": 0.32484934732333276, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0016696582698865078, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.654, "pct_cuda_time": 0.32317968905344624, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 509.858, "cuda_time_us": 24.32, "pct_cuda_time": 0.05509645742691978, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.32, "pct_cuda_time": 0.05509645742691978, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 755.063, "cuda_time_us": 43.52, "pct_cuda_time": 0.09859366065869855, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.072, "pct_cuda_time": 0.02508338719699242, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 30.976, "pct_cuda_time": 0.07017548788060307, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0033347855811030384, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 217.206, "cuda_time_us": 109.598, "pct_cuda_time": 0.24829200415606717, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 108.862, "pct_cuda_time": 0.2466246113655156, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.632, "cuda_time_us": 20.32, "pct_cuda_time": 0.046034540086965864, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.32, "pct_cuda_time": 0.046034540086965864, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 573.144, "cuda_time_us": 994.1940000000002, "pct_cuda_time": 2.2523259619695346, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.355, "cuda_time_us": 618.8710000000001, "pct_cuda_time": 1.4020394615236542, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0017376226499361623, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 618.104, "pct_cuda_time": 1.4003018388737178, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.082, "cuda_time_us": 88.191, "pct_cuda_time": 0.19979488803196882, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.191, "pct_cuda_time": 0.19979488803196882, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 243.309, "cuda_time_us": 287.132, "pct_cuda_time": 0.6504916124139115, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 287.132, "pct_cuda_time": 0.6504916124139115, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2456.042, "cuda_time_us": 1358.991, "pct_cuda_time": 3.078766026935326, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.527, "cuda_time_us": 20.096, "pct_cuda_time": 0.04552707271592844, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.096, "pct_cuda_time": 0.04552707271592844, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1772.201, "cuda_time_us": 323.26, "pct_cuda_time": 0.7323388498283752, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.404, "cuda_time_us": 143.486, "pct_cuda_time": 0.32506456786015664, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.75, "pct_cuda_time": 0.32339717506960514, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 527.612, "cuda_time_us": 24.896, "pct_cuda_time": 0.05640137352387314, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.896, "pct_cuda_time": 0.05640137352387314, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.951, "cuda_time_us": 43.936, "pct_cuda_time": 0.09953610006205374, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.296, "pct_cuda_time": 0.025590854568029838, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.264, "pct_cuda_time": 0.07082794592907976, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.0031172995649441444, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.12, "cuda_time_us": 110.94200000000001, "pct_cuda_time": 0.25133680838229167, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 110.206, "pct_cuda_time": 0.24966941559174016, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.253, "cuda_time_us": 20.608, "pct_cuda_time": 0.04668699813544254, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.04668699813544254, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.061, "cuda_time_us": 995.0269999999999, "pct_cuda_time": 2.25421310625558, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.906, "cuda_time_us": 619.8629999999999, "pct_cuda_time": 1.4042868170239624, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.127, "pct_cuda_time": 1.402619424233411, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.499, "cuda_time_us": 87.519, "pct_cuda_time": 0.19827248591885657, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 87.519, "pct_cuda_time": 0.19827248591885657, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.494, "cuda_time_us": 287.645, "pct_cuda_time": 0.6516538033127606, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 287.645, "pct_cuda_time": 0.6516538033127606, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2393.958, "cuda_time_us": 1361.519, "pct_cuda_time": 3.084493158694177, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.665, "cuda_time_us": 19.872, "pct_cuda_time": 0.04501960534489102, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.872, "pct_cuda_time": 0.04501960534489102, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1723.62, "cuda_time_us": 322.621, "pct_cuda_time": 0.7308912085333176, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.26, "cuda_time_us": 143.102, "pct_cuda_time": 0.3241946237955211, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.366, "pct_cuda_time": 0.3225272310049696, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 473.438, "cuda_time_us": 25.088, "pct_cuda_time": 0.05683634555619093, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.088, "pct_cuda_time": 0.05683634555619093, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 762.171, "cuda_time_us": 43.775999999999996, "pct_cuda_time": 0.09917362336845559, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.136, "pct_cuda_time": 0.025228377874431683, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.36, "pct_cuda_time": 0.07104543194523866, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.783, "cuda_time_us": 110.655, "pct_cuda_time": 0.25068661581314994, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.919, "pct_cuda_time": 0.24901922302259846, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.373, "cuda_time_us": 20.448, "pct_cuda_time": 0.04632452144184439, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.448, "pct_cuda_time": 0.04632452144184439, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.32, "cuda_time_us": 998.578, "pct_cuda_time": 2.262257823374124, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.374, "cuda_time_us": 621.015, "pct_cuda_time": 1.4068966492178692, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.001665127311216531, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 620.28, "pct_cuda_time": 1.4052315219066527, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.546, "cuda_time_us": 88.351, "pct_cuda_time": 0.20015736472556697, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.351, "pct_cuda_time": 0.20015736472556697, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.645, "cuda_time_us": 289.212, "pct_cuda_time": 0.6552038094306876, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 289.212, "pct_cuda_time": 0.6552038094306876, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2586.062, "cuda_time_us": 1357.7420000000002, "pct_cuda_time": 3.0759364432459257, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.506, "cuda_time_us": 19.488, "pct_cuda_time": 0.044149661280255444, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.488, "pct_cuda_time": 0.044149661280255444, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1882.406, "cuda_time_us": 321.11499999999995, "pct_cuda_time": 0.7274793966548249, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.785, "cuda_time_us": 143.486, "pct_cuda_time": 0.32506456786015664, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.75, "pct_cuda_time": 0.32339717506960514, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 515.313, "cuda_time_us": 24.128, "pct_cuda_time": 0.054661485394601986, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.128, "pct_cuda_time": 0.054661485394601986, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 766.923, "cuda_time_us": 43.647, "pct_cuda_time": 0.09888137653424207, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.025518359229310207, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 30.911, "pct_cuda_time": 0.07002823172382883, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0033347855811030384, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 202.512, "cuda_time_us": 109.854, "pct_cuda_time": 0.2488719668658242, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.118, "pct_cuda_time": 0.24720457407527266, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.301, "cuda_time_us": 20.288, "pct_cuda_time": 0.04596204474824623, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.288, "pct_cuda_time": 0.04596204474824623, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 456.606, "cuda_time_us": 996.8510000000001, "pct_cuda_time": 2.2583453405625993, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.095, "cuda_time_us": 619.864, "pct_cuda_time": 1.4042890825032976, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.128, "pct_cuda_time": 1.4026216897127461, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.417, "cuda_time_us": 88.255, "pct_cuda_time": 0.19993987870940808, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.255, "pct_cuda_time": 0.19993987870940808, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.35, "cuda_time_us": 288.732, "pct_cuda_time": 0.6541163793498932, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.732, "pct_cuda_time": 0.6541163793498932, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3593.79, "cuda_time_us": 1361.325, "pct_cuda_time": 3.084053655703189, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.31, "cuda_time_us": 19.872, "pct_cuda_time": 0.04501960534489102, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.872, "pct_cuda_time": 0.04501960534489102, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2806.197, "cuda_time_us": 325.628, "pct_cuda_time": 0.7377035048936279, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 781.476, "cuda_time_us": 146.878, "pct_cuda_time": 0.3327490737644376, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 146.142, "pct_cuda_time": 0.331081680973886, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 543.617, "cuda_time_us": 24.991, "pct_cuda_time": 0.05661659406069704, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.991, "pct_cuda_time": 0.05661659406069704, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1070.483, "cuda_time_us": 43.84, "pct_cuda_time": 0.09931861404589486, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.04, "pct_cuda_time": 0.025010891858272787, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.264, "pct_cuda_time": 0.07082794592907976, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0034797762585423017, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 229.609, "cuda_time_us": 109.919, "pct_cuda_time": 0.24901922302259846, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.151, "pct_cuda_time": 0.2472793348933273, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 104.607, "cuda_time_us": 20.191, "pct_cuda_time": 0.04574229325275234, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.191, "pct_cuda_time": 0.04574229325275234, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 536.273, "cuda_time_us": 995.634, "pct_cuda_time": 2.2555882522119175, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 194.263, "cuda_time_us": 618.3910000000001, "pct_cuda_time": 1.4009520314428596, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 617.623, "pct_cuda_time": 1.3992121433135885, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 120.076, "cuda_time_us": 88.447, "pct_cuda_time": 0.2003748507417259, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.447, "pct_cuda_time": 0.2003748507417259, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 158.119, "cuda_time_us": 288.796, "pct_cuda_time": 0.6542613700273323, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.796, "pct_cuda_time": 0.6542613700273323, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2565.568, "cuda_time_us": 1358.863, "pct_cuda_time": 3.0784760455804476, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.974, "cuda_time_us": 19.52, "pct_cuda_time": 0.04422215661897508, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.52, "pct_cuda_time": 0.04422215661897508, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1862.692, "cuda_time_us": 321.629, "pct_cuda_time": 0.728643853033009, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.829, "cuda_time_us": 142.942, "pct_cuda_time": 0.32383214710192293, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.174, "pct_cuda_time": 0.3220922589726518, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 574.478, "cuda_time_us": 24.544, "pct_cuda_time": 0.05560392479795719, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.544, "pct_cuda_time": 0.05560392479795719, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 774.563, "cuda_time_us": 44.096, "pct_cuda_time": 0.09989857675565189, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.328, "pct_cuda_time": 0.025663349906749473, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.52, "pct_cuda_time": 0.07140790863883681, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.00282731821006562, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.939, "cuda_time_us": 110.04700000000001, "pct_cuda_time": 0.249309204377477, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.311, "pct_cuda_time": 0.2476418115869255, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.523, "cuda_time_us": 20.128, "pct_cuda_time": 0.04559956805464807, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.128, "pct_cuda_time": 0.04559956805464807, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 464.482, "cuda_time_us": 997.586, "pct_cuda_time": 2.260010467873815, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.001, "cuda_time_us": 620.727, "pct_cuda_time": 1.4062441911693926, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.001665127311216531, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.992, "pct_cuda_time": 1.4045790638581759, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.827, "cuda_time_us": 88.095, "pct_cuda_time": 0.19957740201580992, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.095, "pct_cuda_time": 0.19957740201580992, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.076, "cuda_time_us": 288.764, "pct_cuda_time": 0.6541888746886126, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.764, "pct_cuda_time": 0.6541888746886126, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2473.592, "cuda_time_us": 1354.734, "pct_cuda_time": 3.06912188140628, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.313, "cuda_time_us": 19.424, "pct_cuda_time": 0.04400467060281619, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.424, "pct_cuda_time": 0.04400467060281619, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1741.427, "cuda_time_us": 322.619, "pct_cuda_time": 0.7308866775746476, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.598, "cuda_time_us": 143.358, "pct_cuda_time": 0.32477458650527813, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.622, "pct_cuda_time": 0.3231071937147267, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 496.141, "cuda_time_us": 24.735, "pct_cuda_time": 0.05603663135093998, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.735, "pct_cuda_time": 0.05603663135093998, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 755.779, "cuda_time_us": 43.871, "pct_cuda_time": 0.09938884390527951, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.039, "pct_cuda_time": 0.025008626378937804, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.552, "pct_cuda_time": 0.07148040397755644, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 186.495, "cuda_time_us": 110.655, "pct_cuda_time": 0.25068661581314994, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.919, "pct_cuda_time": 0.24901922302259846, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 106.422, "cuda_time_us": 20.832, "pct_cuda_time": 0.04719446550647996, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.832, "pct_cuda_time": 0.04719446550647996, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 472.7, "cuda_time_us": 991.8589999999999, "pct_cuda_time": 2.2470360677223358, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.887, "cuda_time_us": 615.736, "pct_cuda_time": 1.394937183808465, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 615.0, "pct_cuda_time": 1.3932697910179137, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.894, "cuda_time_us": 87.679, "pct_cuda_time": 0.19863496261245472, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 87.679, "pct_cuda_time": 0.19863496261245472, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.669, "cuda_time_us": 288.444, "pct_cuda_time": 0.6534639213014165, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.444, "pct_cuda_time": 0.6534639213014165, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2502.817, "cuda_time_us": 1359.758, "pct_cuda_time": 3.0805036495852622, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.169, "cuda_time_us": 19.456, "pct_cuda_time": 0.044077165941535816, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.456, "pct_cuda_time": 0.044077165941535816, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1786.162, "cuda_time_us": 321.562, "pct_cuda_time": 0.7284920659175649, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.411, "cuda_time_us": 142.141, "pct_cuda_time": 0.3220174981545971, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0017376226499361623, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 141.374, "pct_cuda_time": 0.320279875504661, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 500.496, "cuda_time_us": 24.416, "pct_cuda_time": 0.05531394344307866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.416, "pct_cuda_time": 0.05531394344307866, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 756.543, "cuda_time_us": 44.382, "pct_cuda_time": 0.1005465038454586, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.103, "pct_cuda_time": 0.025153617056377062, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.807, "pct_cuda_time": 0.0720581012079785, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0033347855811030384, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 215.587, "cuda_time_us": 110.623, "pct_cuda_time": 0.2506141204744303, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.887, "pct_cuda_time": 0.2489467276838788, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.603, "cuda_time_us": 20.096, "pct_cuda_time": 0.04552707271592844, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.096, "pct_cuda_time": 0.04552707271592844, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.667, "cuda_time_us": 998.644, "pct_cuda_time": 2.2624073450102333, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.689, "cuda_time_us": 620.441, "pct_cuda_time": 1.4055962640795858, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.769, "pct_cuda_time": 0.0017421536086061392, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.672, "pct_cuda_time": 1.4038541104709799, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.571, "cuda_time_us": 88.447, "pct_cuda_time": 0.2003748507417259, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.447, "pct_cuda_time": 0.2003748507417259, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.146, "cuda_time_us": 289.756, "pct_cuda_time": 0.6564362301889213, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 289.756, "pct_cuda_time": 0.6564362301889213, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2564.528, "cuda_time_us": 1356.943, "pct_cuda_time": 3.0741263252572693, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 96.293, "cuda_time_us": 19.68, "pct_cuda_time": 0.04458463331257324, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.68, "pct_cuda_time": 0.04458463331257324, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1833.968, "cuda_time_us": 321.372, "pct_cuda_time": 0.728061624843917, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.14, "cuda_time_us": 143.678, "pct_cuda_time": 0.32549953989247443, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.942, "pct_cuda_time": 0.32383214710192293, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 551.119, "cuda_time_us": 24.608, "pct_cuda_time": 0.055748915475396454, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.608, "pct_cuda_time": 0.055748915475396454, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 759.538, "cuda_time_us": 43.679, "pct_cuda_time": 0.09895387187296172, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.104, "pct_cuda_time": 0.025155882535712052, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.135, "pct_cuda_time": 0.07053569909486625, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0032622902423834073, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 202.893, "cuda_time_us": 109.40700000000001, "pct_cuda_time": 0.24785929760308437, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 108.671, "pct_cuda_time": 0.24619190481253284, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.065, "cuda_time_us": 20.128, "pct_cuda_time": 0.04559956805464807, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.128, "pct_cuda_time": 0.04559956805464807, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 470.375, "cuda_time_us": 995.7629999999999, "pct_cuda_time": 2.255880499046131, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.252, "cuda_time_us": 618.231, "pct_cuda_time": 1.4005895547492613, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 617.495, "pct_cuda_time": 1.3989221619587098, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.332, "cuda_time_us": 88.511, "pct_cuda_time": 0.20051984141916512, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.511, "pct_cuda_time": 0.20051984141916512, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.58, "cuda_time_us": 289.021, "pct_cuda_time": 0.6547711028777048, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 289.021, "pct_cuda_time": 0.6547711028777048, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2418.08, "cuda_time_us": 1359.054, "pct_cuda_time": 3.0789087521334304, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.572, "cuda_time_us": 19.584, "pct_cuda_time": 0.04436714729641434, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.584, "pct_cuda_time": 0.04436714729641434, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1682.229, "cuda_time_us": 321.916, "pct_cuda_time": 0.7292940456021506, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.824, "cuda_time_us": 143.038, "pct_cuda_time": 0.3240496331180819, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.27, "pct_cuda_time": 0.3223097449888107, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 476.474, "cuda_time_us": 25.28, "pct_cuda_time": 0.05727131758850871, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.28, "pct_cuda_time": 0.05727131758850871, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 716.749, "cuda_time_us": 43.903999999999996, "pct_cuda_time": 0.09946360472333411, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.52, "pct_cuda_time": 0.02609832193906726, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.104, "pct_cuda_time": 0.07046546923548161, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.543, "cuda_time_us": 109.694, "pct_cuda_time": 0.24850949017222604, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 108.958, "pct_cuda_time": 0.24684209738167454, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.644, "cuda_time_us": 20.096, "pct_cuda_time": 0.04552707271592844, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.096, "pct_cuda_time": 0.04552707271592844, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 507.08, "cuda_time_us": 997.458, "pct_cuda_time": 2.259720486518937, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.441, "cuda_time_us": 619.608, "pct_cuda_time": 1.4037091197935403, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 618.872, "pct_cuda_time": 1.402041727002989, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 108.729, "cuda_time_us": 88.542, "pct_cuda_time": 0.20059007127854978, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.542, "pct_cuda_time": 0.20059007127854978, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 178.289, "cuda_time_us": 289.308, "pct_cuda_time": 0.6554212954468464, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 289.308, "pct_cuda_time": 0.6554212954468464, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2479.955, "cuda_time_us": 1361.676, "pct_cuda_time": 3.0848488389497697, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.466, "cuda_time_us": 19.935, "pct_cuda_time": 0.045162330542995295, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.935, "pct_cuda_time": 0.045162330542995295, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1781.938, "cuda_time_us": 321.626, "pct_cuda_time": 0.728637056595004, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.169, "cuda_time_us": 142.62199999999999, "pct_cuda_time": 0.32310719371472657, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 141.886, "pct_cuda_time": 0.32143980092417507, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 527.274, "cuda_time_us": 24.703, "pct_cuda_time": 0.05596413601222036, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.703, "pct_cuda_time": 0.05596413601222036, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 750.181, "cuda_time_us": 43.742, "pct_cuda_time": 0.09909659707106598, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.007, "pct_cuda_time": 0.02493613104021817, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.455, "pct_cuda_time": 0.07126065248206255, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.325, "cuda_time_us": 110.559, "pct_cuda_time": 0.2504691297969911, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.823, "pct_cuda_time": 0.24880173700643954, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.227, "cuda_time_us": 20.895, "pct_cuda_time": 0.04733719070458423, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.895, "pct_cuda_time": 0.04733719070458423, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 474.471, "cuda_time_us": 999.22, "pct_cuda_time": 2.2637122611071865, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 175.316, "cuda_time_us": 620.28, "pct_cuda_time": 1.4052315219066527, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.544, "pct_cuda_time": 1.4035641291161012, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.565, "cuda_time_us": 88.831, "pct_cuda_time": 0.20124479480636143, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.831, "pct_cuda_time": 0.20124479480636143, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.343, "cuda_time_us": 290.109, "pct_cuda_time": 0.6572359443941722, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 290.109, "pct_cuda_time": 0.6572359443941722, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2753.107, "cuda_time_us": 1357.4879999999998, "pct_cuda_time": 3.075361011494838, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.191, "cuda_time_us": 19.488, "pct_cuda_time": 0.044149661280255444, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.488, "pct_cuda_time": 0.044149661280255444, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2057.859, "cuda_time_us": 322.556, "pct_cuda_time": 0.7307439523765433, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.8, "cuda_time_us": 143.262, "pct_cuda_time": 0.32455710048911923, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.526, "pct_cuda_time": 0.32288970769856773, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 481.428, "cuda_time_us": 25.056, "pct_cuda_time": 0.056763850217471296, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.056, "pct_cuda_time": 0.056763850217471296, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1078.605, "cuda_time_us": 43.999, "pct_cuda_time": 0.09967882526015802, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.136, "pct_cuda_time": 0.025228377874431683, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.391, "pct_cuda_time": 0.0711156618046233, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0033347855811030384, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.634, "cuda_time_us": 110.23899999999999, "pct_cuda_time": 0.2497441764097947, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.704, "pct_cuda_time": 0.001594897451831888, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.535, "pct_cuda_time": 0.24814927895796288, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.238, "cuda_time_us": 20.384, "pct_cuda_time": 0.046179530764405126, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.384, "pct_cuda_time": 0.046179530764405126, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 470.611, "cuda_time_us": 995.06, "pct_cuda_time": 2.2542878670736344, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.781, "cuda_time_us": 618.3919999999999, "pct_cuda_time": 1.4009542969221944, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 617.656, "pct_cuda_time": 1.399286904131643, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.587, "cuda_time_us": 87.935, "pct_cuda_time": 0.19921492532221177, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 87.935, "pct_cuda_time": 0.19921492532221177, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.409, "cuda_time_us": 288.733, "pct_cuda_time": 0.6541186448292281, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.733, "pct_cuda_time": 0.6541186448292281, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2420.82, "cuda_time_us": 1359.405, "pct_cuda_time": 3.079703935380011, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.908, "cuda_time_us": 19.615, "pct_cuda_time": 0.044437377155798985, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.615, "pct_cuda_time": 0.044437377155798985, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1731.877, "cuda_time_us": 322.044, "pct_cuda_time": 0.7295840269570292, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.799, "cuda_time_us": 143.902, "pct_cuda_time": 0.32600700726351184, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 143.166, "pct_cuda_time": 0.3243396144729604, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 500.742, "cuda_time_us": 24.608, "pct_cuda_time": 0.055748915475396454, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.608, "pct_cuda_time": 0.055748915475396454, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 726.25, "cuda_time_us": 43.775, "pct_cuda_time": 0.0991713578891206, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.296, "pct_cuda_time": 0.025590854568029838, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.007, "pct_cuda_time": 0.07024571773998772, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0033347855811030384, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 202.37, "cuda_time_us": 109.759, "pct_cuda_time": 0.2486567463290003, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.704, "pct_cuda_time": 0.001594897451831888, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.055, "pct_cuda_time": 0.24706184887716842, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.083, "cuda_time_us": 19.743, "pct_cuda_time": 0.04472735851067751, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.743, "pct_cuda_time": 0.04472735851067751, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.256, "cuda_time_us": 998.0029999999999, "pct_cuda_time": 2.260955172756505, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.294, "cuda_time_us": 619.896, "pct_cuda_time": 1.404361577842017, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.16, "pct_cuda_time": 1.4026941850514656, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.337, "cuda_time_us": 88.703, "pct_cuda_time": 0.20095481345148292, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.703, "pct_cuda_time": 0.20095481345148292, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.411, "cuda_time_us": 289.404, "pct_cuda_time": 0.6556387814630052, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 289.404, "pct_cuda_time": 0.6556387814630052, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2559.217, "cuda_time_us": 1360.719, "pct_cuda_time": 3.082680775226186, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 125.481, "cuda_time_us": 19.808, "pct_cuda_time": 0.04487461466745176, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.808, "pct_cuda_time": 0.04487461466745176, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1817.691, "cuda_time_us": 322.526, "pct_cuda_time": 0.7306759879964937, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 183.678, "cuda_time_us": 143.2, "pct_cuda_time": 0.32441664077035, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0016696582698865078, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.463, "pct_cuda_time": 0.32274698250046346, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 535.24, "cuda_time_us": 24.735, "pct_cuda_time": 0.05603663135093998, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.735, "pct_cuda_time": 0.05603663135093998, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 753.164, "cuda_time_us": 44.448, "pct_cuda_time": 0.10069602548156784, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.744, "pct_cuda_time": 0.026605789310104676, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.424, "pct_cuda_time": 0.07119042262267791, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.611, "cuda_time_us": 110.143, "pct_cuda_time": 0.24952669039363587, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.407, "pct_cuda_time": 0.24785929760308434, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.158, "cuda_time_us": 19.968, "pct_cuda_time": 0.04523709136104992, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.968, "pct_cuda_time": 0.04523709136104992, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 457.118, "cuda_time_us": 998.4169999999999, "pct_cuda_time": 2.2618930812011904, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.944, "cuda_time_us": 620.7909999999999, "pct_cuda_time": 1.4063891818468317, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 620.055, "pct_cuda_time": 1.4047217890562802, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.507, "cuda_time_us": 88.606, "pct_cuda_time": 0.200735061955989, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.606, "pct_cuda_time": 0.200735061955989, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.44, "cuda_time_us": 289.02, "pct_cuda_time": 0.6547688373983697, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 289.02, "pct_cuda_time": 0.6547688373983697, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2416.636, "cuda_time_us": 1361.871, "pct_cuda_time": 3.085290607420093, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.189, "cuda_time_us": 20.095, "pct_cuda_time": 0.045524807236593454, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.095, "pct_cuda_time": 0.045524807236593454, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1719.76, "cuda_time_us": 323.35699999999997, "pct_cuda_time": 0.732558601323869, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.8, "cuda_time_us": 143.646, "pct_cuda_time": 0.3254270445537548, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.878, "pct_cuda_time": 0.32368715642448365, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 498.854, "cuda_time_us": 25.088, "pct_cuda_time": 0.05683634555619093, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.088, "pct_cuda_time": 0.05683634555619093, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 741.447, "cuda_time_us": 44.064, "pct_cuda_time": 0.09982608141693226, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.136, "pct_cuda_time": 0.025228377874431683, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.648, "pct_cuda_time": 0.07169788999371533, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 188.207, "cuda_time_us": 110.559, "pct_cuda_time": 0.2504691297969911, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.823, "pct_cuda_time": 0.24880173700643954, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.846, "cuda_time_us": 21.183, "pct_cuda_time": 0.047989648753060915, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 21.183, "pct_cuda_time": 0.047989648753060915, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 461.411, "cuda_time_us": 997.236, "pct_cuda_time": 2.259217550106569, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.511, "cuda_time_us": 619.96, "pct_cuda_time": 1.4045065685194564, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.192, "pct_cuda_time": 1.4027666803901853, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.279, "cuda_time_us": 88.415, "pct_cuda_time": 0.20030235540300623, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.415, "pct_cuda_time": 0.20030235540300623, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.929, "cuda_time_us": 288.861, "pct_cuda_time": 0.6544086261841066, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.861, "pct_cuda_time": 0.6544086261841066, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2535.321, "cuda_time_us": 1361.355, "pct_cuda_time": 3.0841216200832386, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.786, "cuda_time_us": 19.455, "pct_cuda_time": 0.04407490046220082, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.455, "pct_cuda_time": 0.04407490046220082, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1827.475, "cuda_time_us": 322.426, "pct_cuda_time": 0.7304494400629947, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 164.829, "cuda_time_us": 143.07, "pct_cuda_time": 0.32412212845680144, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.334, "pct_cuda_time": 0.32245473566624994, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 528.314, "cuda_time_us": 24.928, "pct_cuda_time": 0.05647386886259277, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.928, "pct_cuda_time": 0.05647386886259277, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 775.997, "cuda_time_us": 44.35, "pct_cuda_time": 0.10047400850673899, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.135, "pct_cuda_time": 0.025226112395096693, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.743, "pct_cuda_time": 0.07191311053053924, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0033347855811030384, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.687, "cuda_time_us": 110.078, "pct_cuda_time": 0.24937943423686162, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.31, "pct_cuda_time": 0.24763954610759045, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.138, "cuda_time_us": 20.384, "pct_cuda_time": 0.046179530764405126, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.384, "pct_cuda_time": 0.046179530764405126, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 468.529, "cuda_time_us": 999.09, "pct_cuda_time": 2.263417748793638, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.276, "cuda_time_us": 621.495, "pct_cuda_time": 1.4079840792986638, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 620.759, "pct_cuda_time": 1.4063166865081123, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.276, "cuda_time_us": 88.639, "pct_cuda_time": 0.20080982277404366, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.639, "pct_cuda_time": 0.20080982277404366, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.188, "cuda_time_us": 288.956, "pct_cuda_time": 0.6546238467209305, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.956, "pct_cuda_time": 0.6546238467209305, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2423.44, "cuda_time_us": 1365.39, "pct_cuda_time": 3.0932628291999174, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.427, "cuda_time_us": 19.52, "pct_cuda_time": 0.04422215661897508, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.52, "pct_cuda_time": 0.04422215661897508, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1740.453, "cuda_time_us": 323.41999999999996, "pct_cuda_time": 0.7327013265219733, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 179.304, "cuda_time_us": 144.12599999999998, "pct_cuda_time": 0.32651447463454925, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 143.39, "pct_cuda_time": 0.32484708184399774, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 491.719, "cuda_time_us": 24.832, "pct_cuda_time": 0.056256382846433875, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.832, "pct_cuda_time": 0.056256382846433875, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 732.875, "cuda_time_us": 44.16, "pct_cuda_time": 0.10004356743309115, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.025445863890590576, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.488, "pct_cuda_time": 0.07133541330011717, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0032622902423834073, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.99, "cuda_time_us": 110.302, "pct_cuda_time": 0.24988690160789906, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.534, "pct_cuda_time": 0.2481470134786279, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.2, "cuda_time_us": 20.512, "pct_cuda_time": 0.04646951211928365, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.512, "pct_cuda_time": 0.04646951211928365, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.467, "cuda_time_us": 1001.9380000000001, "pct_cuda_time": 2.269869833939685, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.561, "cuda_time_us": 623.832, "pct_cuda_time": 1.4132785045045317, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 623.096, "pct_cuda_time": 1.4116111117139805, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.081, "cuda_time_us": 88.734, "pct_cuda_time": 0.20102504331086757, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.734, "pct_cuda_time": 0.20102504331086757, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.234, "cuda_time_us": 289.372, "pct_cuda_time": 0.6555662861242857, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 289.372, "pct_cuda_time": 0.6555662861242857, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2487.229, "cuda_time_us": 1358.225, "pct_cuda_time": 3.0770306697647247, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.502, "cuda_time_us": 19.84, "pct_cuda_time": 0.04494711000617139, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.84, "pct_cuda_time": 0.04494711000617139, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1761.842, "cuda_time_us": 321.981, "pct_cuda_time": 0.729441301758925, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.279, "cuda_time_us": 143.358, "pct_cuda_time": 0.32477458650527813, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.59, "pct_cuda_time": 0.323034698376007, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 485.613, "cuda_time_us": 25.184, "pct_cuda_time": 0.05705383157234982, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.184, "pct_cuda_time": 0.05705383157234982, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 745.89, "cuda_time_us": 43.424, "pct_cuda_time": 0.09837617464253964, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.02530087321315131, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 30.976, "pct_cuda_time": 0.07017548788060307, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 233.549, "cuda_time_us": 110.015, "pct_cuda_time": 0.24923670903875733, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.279, "pct_cuda_time": 0.24756931624820583, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.649, "cuda_time_us": 19.584, "pct_cuda_time": 0.04436714729641434, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.584, "pct_cuda_time": 0.04436714729641434, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 483.218, "cuda_time_us": 996.8199999999999, "pct_cuda_time": 2.258275110703214, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.85, "cuda_time_us": 619.8969999999999, "pct_cuda_time": 1.404363843321352, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0016696582698865078, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.16, "pct_cuda_time": 1.4026941850514656, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.092, "cuda_time_us": 88.639, "pct_cuda_time": 0.20080982277404366, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.639, "pct_cuda_time": 0.20080982277404366, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.996, "cuda_time_us": 288.284, "pct_cuda_time": 0.6531014446078182, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 288.284, "pct_cuda_time": 0.6531014446078182, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2674.449, "cuda_time_us": 1362.5439999999999, "pct_cuda_time": 3.0868152750125395, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.663, "cuda_time_us": 19.808, "pct_cuda_time": 0.04487461466745176, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.808, "pct_cuda_time": 0.04487461466745176, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1962.538, "cuda_time_us": 321.82, "pct_cuda_time": 0.7290765595859918, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 157.595, "cuda_time_us": 143.006, "pct_cuda_time": 0.3239771377793622, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 142.27, "pct_cuda_time": 0.3223097449888107, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 484.891, "cuda_time_us": 24.864, "pct_cuda_time": 0.0563288781851535, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 24.864, "pct_cuda_time": 0.0563288781851535, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 945.897, "cuda_time_us": 44.191, "pct_cuda_time": 0.10011379729247581, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.488, "pct_cuda_time": 0.026025826600347628, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 31.359, "pct_cuda_time": 0.07104316646590367, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.003044804226224514, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 204.866, "cuda_time_us": 109.759, "pct_cuda_time": 0.2486567463290003, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 109.023, "pct_cuda_time": 0.24698935353844875, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.31, "cuda_time_us": 20.128, "pct_cuda_time": 0.04559956805464807, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.128, "pct_cuda_time": 0.04559956805464807, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.656, "cuda_time_us": 1000.788, "pct_cuda_time": 2.2672645327044485, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.6, "cuda_time_us": 620.505, "pct_cuda_time": 1.4057412547570252, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0016696582698865078, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 619.768, "pct_cuda_time": 1.4040715964871389, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.215, "cuda_time_us": 88.831, "pct_cuda_time": 0.20124479480636143, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.831, "pct_cuda_time": 0.20124479480636143, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.897, "cuda_time_us": 291.452, "pct_cuda_time": 0.6602784831410617, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 291.452, "pct_cuda_time": 0.6602784831410617, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2570.862, "cuda_time_us": 1376.6859999999997, "pct_cuda_time": 3.1188536837679464, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.295, "cuda_time_us": 19.552, "pct_cuda_time": 0.04429465195769471, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.552, "pct_cuda_time": 0.04429465195769471, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1825.94, "cuda_time_us": 329.787, "pct_cuda_time": 0.7471256334478449, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.26, "cuda_time_us": 146.10999999999999, "pct_cuda_time": 0.3310091856351664, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 145.374, "pct_cuda_time": 0.3293417928446149, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 533.149, "cuda_time_us": 25.568, "pct_cuda_time": 0.05792377563698539, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.568, "pct_cuda_time": 0.05792377563698539, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 774.426, "cuda_time_us": 45.342999999999996, "pct_cuda_time": 0.10272362948638251, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.263, "pct_cuda_time": 0.025516093749975217, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 32.576, "pct_cuda_time": 0.07380025481658464, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.00340728091982267, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 210.97, "cuda_time_us": 112.766, "pct_cuda_time": 0.25546904268931064, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 112.03, "pct_cuda_time": 0.25380164989875914, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 106.179, "cuda_time_us": 19.936, "pct_cuda_time": 0.045164596022330285, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.936, "pct_cuda_time": 0.045164596022330285, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 486.236, "cuda_time_us": 1007.4109999999998, "pct_cuda_time": 2.282268802340077, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 174.207, "cuda_time_us": 625.4639999999999, "pct_cuda_time": 1.416975766779233, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 624.728, "pct_cuda_time": 1.4153083739886814, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.126, "cuda_time_us": 89.151, "pct_cuda_time": 0.20196974819355773, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 89.151, "pct_cuda_time": 0.20196974819355773, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.088, "cuda_time_us": 292.796, "pct_cuda_time": 0.6633232873672863, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 292.796, "pct_cuda_time": 0.6633232873672863, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2455.391, "cuda_time_us": 1375.5990000000002, "pct_cuda_time": 3.1163911077308146, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.054, "cuda_time_us": 19.84, "pct_cuda_time": 0.04494711000617139, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.84, "pct_cuda_time": 0.04494711000617139, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1733.695, "cuda_time_us": 328.7, "pct_cuda_time": 0.7446630574107125, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.381, "cuda_time_us": 145.34199999999998, "pct_cuda_time": 0.3292692975058953, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 144.606, "pct_cuda_time": 0.3276019047153438, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 497.801, "cuda_time_us": 26.016, "pct_cuda_time": 0.05893871037906022, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 26.016, "pct_cuda_time": 0.05893871037906022, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 724.248, "cuda_time_us": 45.151, "pct_cuda_time": 0.10228865745406475, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.52, "pct_cuda_time": 0.02609832193906726, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 32.127, "pct_cuda_time": 0.07278305459517481, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.00340728091982267, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.52, "cuda_time_us": 112.191, "pct_cuda_time": 0.2541663920716923, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 111.455, "pct_cuda_time": 0.2524989992811407, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 101.435, "cuda_time_us": 20.192, "pct_cuda_time": 0.04574455873208734, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.192, "pct_cuda_time": 0.04574455873208734, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.38, "cuda_time_us": 1006.8670000000001, "pct_cuda_time": 2.2810363815818433, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.43, "cuda_time_us": 625.144, "pct_cuda_time": 1.4162508133920366, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 624.376, "pct_cuda_time": 1.4145109252627657, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.616, "cuda_time_us": 89.055, "pct_cuda_time": 0.20175226217739886, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 89.055, "pct_cuda_time": 0.20175226217739886, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.803, "cuda_time_us": 292.668, "pct_cuda_time": 0.6630333060124077, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 292.668, "pct_cuda_time": 0.6630333060124077, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2633.442, "cuda_time_us": 1372.495, "pct_cuda_time": 3.1093590598750103, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.847, "cuda_time_us": 19.872, "pct_cuda_time": 0.04501960534489102, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.872, "pct_cuda_time": 0.04501960534489102, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1897.216, "cuda_time_us": 327.86699999999996, "pct_cuda_time": 0.7427759131246671, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.744, "cuda_time_us": 145.40599999999998, "pct_cuda_time": 0.3294142881833345, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 144.67, "pct_cuda_time": 0.327746895392783, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 498.242, "cuda_time_us": 25.184, "pct_cuda_time": 0.05705383157234982, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.184, "pct_cuda_time": 0.05705383157234982, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 867.691, "cuda_time_us": 45.119, "pct_cuda_time": 0.10221616211534511, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.456, "pct_cuda_time": 0.025953331261627997, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 32.383, "pct_cuda_time": 0.07336301730493186, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.002899813548785251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 216.504, "cuda_time_us": 112.158, "pct_cuda_time": 0.25409163125363765, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 111.422, "pct_cuda_time": 0.25242423846308615, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.417, "cuda_time_us": 19.776, "pct_cuda_time": 0.04480211932873213, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.776, "pct_cuda_time": 0.04480211932873213, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 498.215, "cuda_time_us": 1004.98, "pct_cuda_time": 2.2767614220767203, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.459, "cuda_time_us": 622.809, "pct_cuda_time": 1.4109609191448385, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0016696582698865078, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 622.072, "pct_cuda_time": 1.4092912608749522, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.339, "cuda_time_us": 89.375, "pct_cuda_time": 0.20247721556459516, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 89.375, "pct_cuda_time": 0.20247721556459516, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.875, "cuda_time_us": 292.796, "pct_cuda_time": 0.6633232873672863, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 292.796, "pct_cuda_time": 0.6633232873672863, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2498.131, "cuda_time_us": 1373.359, "pct_cuda_time": 3.11131643402044, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.61, "cuda_time_us": 19.616, "pct_cuda_time": 0.04443964263513397, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.616, "pct_cuda_time": 0.04443964263513397, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1799.302, "cuda_time_us": 328.38100000000003, "pct_cuda_time": 0.7439403695028513, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.984, "cuda_time_us": 145.534, "pct_cuda_time": 0.329704269538213, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 144.798, "pct_cuda_time": 0.3280368767476616, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 554.955, "cuda_time_us": 25.312, "pct_cuda_time": 0.057343812927228344, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.312, "pct_cuda_time": 0.057343812927228344, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.387, "cuda_time_us": 44.929, "pct_cuda_time": 0.10178572104169732, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.552, "pct_cuda_time": 0.026170817277786893, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 32.064, "pct_cuda_time": 0.07264032939707055, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.0029745743668398708, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.006, "cuda_time_us": 112.60600000000001, "pct_cuda_time": 0.25510656599571246, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 111.87, "pct_cuda_time": 0.25343917320516096, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.501, "cuda_time_us": 20.991, "pct_cuda_time": 0.04755467672074313, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.991, "pct_cuda_time": 0.04755467672074313, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.629, "cuda_time_us": 1004.371, "pct_cuda_time": 2.275381745161712, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.0, "cuda_time_us": 623.128, "pct_cuda_time": 1.4116836070527, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 622.392, "pct_cuda_time": 1.4100162142621486, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.845, "cuda_time_us": 88.415, "pct_cuda_time": 0.20030235540300623, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.415, "pct_cuda_time": 0.20030235540300623, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.16, "cuda_time_us": 292.828, "pct_cuda_time": 0.6633957827060057, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 292.828, "pct_cuda_time": 0.6633957827060057, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2456.729, "cuda_time_us": 1373.547, "pct_cuda_time": 3.1117423441354184, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.23, "cuda_time_us": 19.871, "pct_cuda_time": 0.04501733986555603, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.871, "pct_cuda_time": 0.04501733986555603, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1719.965, "cuda_time_us": 328.379, "pct_cuda_time": 0.7439358385441813, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.432, "cuda_time_us": 145.693, "pct_cuda_time": 0.33006448075247624, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 144.925, "pct_cuda_time": 0.3283245926232051, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[2048, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 512.7, "cuda_time_us": 25.376, "pct_cuda_time": 0.0574888036046676, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 25.376, "pct_cuda_time": 0.0574888036046676, "trace": "_C::rotary_embedding(int64[2048], bfloat16[2048, 4096], bfloat16[2048, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 718.228, "cuda_time_us": 45.151999999999994, "pct_cuda_time": 0.10229092293339971, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 11.456, "pct_cuda_time": 0.025953331261627997, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[2048], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 32.192, "pct_cuda_time": 0.07293031075194907, "trace": "_vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.00340728091982267, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], None, None, bfloat16[2048, 32, 128], int32[9], int32[9], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[2048, 32, 128], bfloat16[2048, 8, 128], bfloat16[2048, 8, 128], bfloat16[2048, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 188.239, "cuda_time_us": 112.158, "pct_cuda_time": 0.25409163125363765, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 111.422, "pct_cuda_time": 0.25242423846308615, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[2048, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 105.228, "cuda_time_us": 20.319, "pct_cuda_time": 0.04603227460763087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.319, "pct_cuda_time": 0.04603227460763087, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.535, "cuda_time_us": 1004.9780000000001, "pct_cuda_time": 2.2767568911180502, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.781, "cuda_time_us": 623.255, "pct_cuda_time": 1.4119713229282433, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.001665127311216531, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 622.52, "pct_cuda_time": 1.410306195617027, "trace": "mm(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[2048, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[2048, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.485, "cuda_time_us": 88.831, "pct_cuda_time": 0.20124479480636143, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 88.831, "pct_cuda_time": 0.20124479480636143, "trace": "_C::silu_and_mul(bfloat16[2048, 14336], bfloat16[2048, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.785, "cuda_time_us": 292.892, "pct_cuda_time": 0.6635407733834451, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 292.892, "pct_cuda_time": 0.6635407733834451, "trace": "mm(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[2048, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[2048, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.303, "cuda_time_us": 19.392, "pct_cuda_time": 0.043932175264096554, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.392, "pct_cuda_time": 0.043932175264096554, "trace": "_C::fused_add_rms_norm(bfloat16[2048, 4096], bfloat16[2048, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 499.939, "cuda_time_us": 358.843, "pct_cuda_time": 0.8129514010052703, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 5.792, "pct_cuda_time": 0.013121656308253262, "trace": "index_select(bfloat16[2048, 4096], 0, int64[8])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[8, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 352.315, "pct_cuda_time": 0.7981623519064655, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[8, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3685.996, "cuda_time_us": 122.36500000000001, "pct_cuda_time": 0.27721537882586506, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0016673927905515192, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.001665127311216531, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.001812383467990782, "trace": "copy_(int32[8], int32[8], True) <- _to_copy(int32[8], 3, 0, None, None, True, None) <- to(int32[8], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0017398881292711508, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.001812383467990782, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 4.831, "pct_cuda_time": 0.010944530667329335, "trace": "copy_(float32[8, 128256], bfloat16[8, 128256], False) <- _to_copy(bfloat16[8, 128256], 6, None, None, None, False, None) <- to(bfloat16[8, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 6.4, "pct_cuda_time": 0.014499067743926257, "trace": "div_(float32[8, 128256], bfloat16[8, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 35.488, "pct_cuda_time": 0.08039733064007108, "trace": "_softmax(float32[8, 128256], -1, False) <- softmax(float32[8, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.511, "pct_cuda_time": 0.06459108131985648, "trace": "_log_softmax(float32[8, 128256], -1, False) <- log_softmax(float32[8, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.856, "pct_cuda_time": 0.004204729645738614, "trace": "copy_(int64[8], int32[8], False) <- _to_copy(int32[8], 4, None, None, None, False, None) <- to(int32[8], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 7.616, "pct_cuda_time": 0.017253890615272244, "trace": "index(float32[8, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 29.12, "pct_cuda_time": 0.06597075823486447, "trace": "argmax(float32[8, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.007177038533243497, "trace": "copy_(int64[8], int64[8], False) <- _to_copy(int64[8], 4, 0, None, None, False, None) <- to(int64[8], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] }, "decode_1": { "metadata": { "num_running_seqs": 8 }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 6432.813999999999, "pct_cuda_time": 93.15568991653541, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 7.104, "pct_cuda_time": 0.10287535457531768, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 7.104, "pct_cuda_time": 0.10287535457531768, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 6422.51, "pct_cuda_time": 93.00647431215141, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 212.478, "pct_cuda_time": 3.07696362464166, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.191, "pct_cuda_time": 0.060691245921333944, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 208.287, "pct_cuda_time": 3.016272378720326, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 1913.2230000000002, "pct_cuda_time": 27.70600992492301, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 681.4039999999999, "pct_cuda_time": 9.867634868952669, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 681.4039999999999, "pct_cuda_time": 9.867634868952669, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 117.04999999999998, "pct_cuda_time": 1.6950394500339152, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 117.04999999999998, "pct_cuda_time": 1.6950394500339152, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 540.244, "pct_cuda_time": 7.823450599266319, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 77.56599999999999, "pct_cuda_time": 1.1232586927067976, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 415.25700000000006, "pct_cuda_time": 6.0134728483787585, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 47.421, "pct_cuda_time": 0.686719058180763, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 574.525, "pct_cuda_time": 8.3198850066701, "invocations": 32 }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cuda_time_us": 505.72100000000006, "pct_cuda_time": 7.323511710470755, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cuda_time_us": 68.804, "pct_cuda_time": 0.9963732961993466, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 4296.808999999999, "pct_cuda_time": 62.223500762586724, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 2607.5180000000005, "pct_cuda_time": 37.76032359396442, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2607.5180000000005, "pct_cuda_time": 37.76032359396442, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 285.18, "pct_cuda_time": 4.129785137639231, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 285.18, "pct_cuda_time": 4.129785137639231, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 1404.1109999999999, "pct_cuda_time": 20.33339203098309, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 1404.1109999999999, "pct_cuda_time": 20.33339203098309, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 352.187, "pct_cuda_time": 5.100135487305379, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 5.471, "pct_cuda_time": 0.07922734584481463, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.010658257456001382, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 345.98, "pct_cuda_time": 5.010249884004563, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 120.443, "pct_cuda_time": 1.7441745961592043, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.406999999999999, "pct_cuda_time": 0.07830054084864058, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 4.639, "pct_cuda_time": 0.06717888089455219, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 6.399, "pct_cuda_time": 0.0926660182893381, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 35.295, "pct_cuda_time": 0.5111184740619141, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.575, "pct_cuda_time": 0.41380394946364063, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.728, "pct_cuda_time": 0.025023734896698898, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 7.584, "pct_cuda_time": 0.10982639204662292, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 28.32, "pct_cuda_time": 0.4101112108070097, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.496, "pct_cuda_time": 0.0361453948507873, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 81923.723, "cuda_time_us": 6432.813999999999, "pct_cuda_time": 93.15568991653541, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 345.919, "cuda_time_us": 7.104, "pct_cuda_time": 0.10287535457531768, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 7.104, "pct_cuda_time": 0.10287535457531768, "trace": "index_select(bfloat16[128256, 4096], 0, int64[8]) <- embedding(bfloat16[128256, 4096], int64[8], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 5066.369, "cuda_time_us": 208.41400000000002, "pct_cuda_time": 3.018111507384609, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 338.211, "cuda_time_us": 4.191, "pct_cuda_time": 0.060691245921333944, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.191, "pct_cuda_time": 0.060691245921333944, "trace": "_C::rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 3790.83, "cuda_time_us": 66.94500000000001, "pct_cuda_time": 0.969452507326104, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 826.47, "cuda_time_us": 27.008, "pct_cuda_time": 0.391111708385442, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.008, "pct_cuda_time": 0.391111708385442, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1100.497, "cuda_time_us": 3.456, "pct_cuda_time": 0.050047469793397796, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.456, "pct_cuda_time": 0.050047469793397796, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1182.126, "cuda_time_us": 18.016000000000002, "pct_cuda_time": 0.26089560642299037, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.0361453948507873, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.856, "pct_cuda_time": 0.2006532816716782, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.024096929900524863, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 370.782, "cuda_time_us": 18.465, "pct_cuda_time": 0.26739772272427376, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.128, "pct_cuda_time": 0.23355485903585635, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.337, "pct_cuda_time": 0.033842863688417435, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 120.338, "cuda_time_us": 3.36, "pct_cuda_time": 0.04865726229913674, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.36, "pct_cuda_time": 0.04865726229913674, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 683.384, "cuda_time_us": 133.918, "pct_cuda_time": 1.9393104918380342, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 281.712, "cuda_time_us": 80.927, "pct_cuda_time": 1.171930436334, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.927, "pct_cuda_time": 1.171930436334, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 152.901, "cuda_time_us": 8.928, "pct_cuda_time": 0.12928929696627764, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12928929696627764, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 171.395, "cuda_time_us": 44.063, "pct_cuda_time": 0.6380907585377567, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.063, "pct_cuda_time": 0.6380907585377567, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2695.938, "cuda_time_us": 201.149, "pct_cuda_time": 2.9129046589907905, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.25, "cuda_time_us": 3.232, "pct_cuda_time": 0.04680365230678868, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04680365230678868, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1918.154, "cuda_time_us": 58.527, "pct_cuda_time": 0.8475486876730881, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.246, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 561.668, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 822.144, "cuda_time_us": 16.512, "pct_cuda_time": 0.2391156890129006, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.672, "pct_cuda_time": 0.18350738924245857, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.271, "cuda_time_us": 17.759, "pct_cuda_time": 0.257173905110229, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.615, "pct_cuda_time": 0.2261259377383989, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 93.646, "cuda_time_us": 3.488, "pct_cuda_time": 0.05051087229148481, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.488, "pct_cuda_time": 0.05051087229148481, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 507.586, "cuda_time_us": 135.902, "pct_cuda_time": 1.9680414467194287, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 182.435, "cuda_time_us": 83.263, "pct_cuda_time": 1.205758818694352, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.263, "pct_cuda_time": 1.205758818694352, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 106.457, "cuda_time_us": 8.832, "pct_cuda_time": 0.12789908947201659, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.832, "pct_cuda_time": 0.12789908947201659, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.779, "cuda_time_us": 43.807, "pct_cuda_time": 0.6343835385530605, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.807, "pct_cuda_time": 0.6343835385530605, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2462.361, "cuda_time_us": 201.53300000000002, "pct_cuda_time": 2.918465488967835, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.356, "cuda_time_us": 3.168, "pct_cuda_time": 0.045876847310614643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.045876847310614643, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1724.699, "cuda_time_us": 59.327, "pct_cuda_time": 0.8591337501252636, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.348, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.469, "cuda_time_us": 3.712, "pct_cuda_time": 0.05375468977809393, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05375468977809393, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 719.558, "cuda_time_us": 16.863, "pct_cuda_time": 0.2441986351637925, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.03568199235270028, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.895, "pct_cuda_time": 0.18673672540100245, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.02177991741008978, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.757, "cuda_time_us": 18.144, "pct_cuda_time": 0.2627492164153384, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.032, "pct_cuda_time": 0.23216465154159532, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.827, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 476.373, "cuda_time_us": 135.71, "pct_cuda_time": 1.9652610317309072, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.803, "cuda_time_us": 83.775, "pct_cuda_time": 1.2131732586637443, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.775, "pct_cuda_time": 1.2131732586637443, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.606, "cuda_time_us": 8.8, "pct_cuda_time": 0.12743568697392957, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.8, "pct_cuda_time": 0.12743568697392957, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.352, "cuda_time_us": 43.135, "pct_cuda_time": 0.6246520860932331, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.135, "pct_cuda_time": 0.6246520860932331, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2389.002, "cuda_time_us": 199.934, "pct_cuda_time": 2.8953098453915493, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.273, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1711.34, "cuda_time_us": 58.848, "pct_cuda_time": 0.8521971939820235, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.284, "cuda_time_us": 20.704, "pct_cuda_time": 0.29982141626229974, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.704, "pct_cuda_time": 0.29982141626229974, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 512.284, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 741.196, "cuda_time_us": 16.8, "pct_cuda_time": 0.24328631149568375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.992, "pct_cuda_time": 0.18814141422332875, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 160.855, "cuda_time_us": 17.695999999999998, "pct_cuda_time": 0.25626158144212013, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.2256770165683771, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.598, "cuda_time_us": 3.36, "pct_cuda_time": 0.04865726229913674, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.36, "pct_cuda_time": 0.04865726229913674, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.338, "cuda_time_us": 134.462, "pct_cuda_time": 1.9471883343055132, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.502, "cuda_time_us": 82.271, "pct_cuda_time": 1.1913933412536544, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.271, "pct_cuda_time": 1.1913933412536544, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.097, "cuda_time_us": 8.639, "pct_cuda_time": 0.12510419315542926, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.639, "pct_cuda_time": 0.12510419315542926, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.616, "cuda_time_us": 43.552, "pct_cuda_time": 0.6306907998964295, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.552, "pct_cuda_time": 0.6306907998964295, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2397.333, "cuda_time_us": 200.73399999999998, "pct_cuda_time": 2.9068949078437245, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.358, "cuda_time_us": 3.201, "pct_cuda_time": 0.04635473113676688, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.201, "pct_cuda_time": 0.04635473113676688, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1679.478, "cuda_time_us": 60.223, "pct_cuda_time": 0.8721090200716999, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.169, "cuda_time_us": 21.6, "pct_cuda_time": 0.31279668620873624, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.6, "pct_cuda_time": 0.31279668620873624, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 490.008, "cuda_time_us": 3.712, "pct_cuda_time": 0.05375468977809393, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05375468977809393, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 715.524, "cuda_time_us": 17.247, "pct_cuda_time": 0.2497594651408367, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.495, "pct_cuda_time": 0.03613091352272208, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.248, "pct_cuda_time": 0.19184863420802487, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.02177991741008978, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.691, "cuda_time_us": 17.664, "pct_cuda_time": 0.25579817894403317, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22521361407029009, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.418, "cuda_time_us": 3.359, "pct_cuda_time": 0.048642780971071525, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.359, "pct_cuda_time": 0.048642780971071525, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 492.246, "cuda_time_us": 133.951, "pct_cuda_time": 1.939788375664186, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 175.455, "cuda_time_us": 81.311, "pct_cuda_time": 1.177491266311044, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.311, "pct_cuda_time": 1.177491266311044, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.461, "cuda_time_us": 8.832, "pct_cuda_time": 0.12789908947201659, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.832, "pct_cuda_time": 0.12789908947201659, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.276, "cuda_time_us": 43.808, "pct_cuda_time": 0.6343980198811258, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.808, "pct_cuda_time": 0.6343980198811258, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2693.124, "cuda_time_us": 200.572, "pct_cuda_time": 2.904548932697159, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.18, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1898.773, "cuda_time_us": 59.836999999999996, "pct_cuda_time": 0.8665192274385253, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.39, "cuda_time_us": 20.896, "pct_cuda_time": 0.30260183125082185, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.896, "pct_cuda_time": 0.30260183125082185, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 530.118, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 879.9, "cuda_time_us": 16.958, "pct_cuda_time": 0.24557436132998833, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.399, "pct_cuda_time": 0.03474070602846102, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.12, "pct_cuda_time": 0.1899950242156768, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.439, "pct_cuda_time": 0.020838631085850528, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 187.641, "cuda_time_us": 18.335, "pct_cuda_time": 0.2655151500757953, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.191, "pct_cuda_time": 0.23446718270396516, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.616, "cuda_time_us": 3.456, "pct_cuda_time": 0.050047469793397796, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.456, "pct_cuda_time": 0.050047469793397796, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 504.873, "cuda_time_us": 134.079, "pct_cuda_time": 1.9416419856565343, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 199.153, "cuda_time_us": 80.607, "pct_cuda_time": 1.1672964113531297, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.607, "pct_cuda_time": 1.1672964113531297, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.896, "cuda_time_us": 9.153, "pct_cuda_time": 0.13254759578095196, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.153, "pct_cuda_time": 0.13254759578095196, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.615, "cuda_time_us": 44.319, "pct_cuda_time": 0.6417979785224528, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.319, "pct_cuda_time": 0.6417979785224528, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2354.497, "cuda_time_us": 200.317, "pct_cuda_time": 2.900856194040528, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.402, "cuda_time_us": 3.168, "pct_cuda_time": 0.045876847310614643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.045876847310614643, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1665.258, "cuda_time_us": 60.095, "pct_cuda_time": 0.8702554100793519, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.658, "cuda_time_us": 21.056, "pct_cuda_time": 0.30491884374125694, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.056, "pct_cuda_time": 0.30491884374125694, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 497.486, "cuda_time_us": 3.68, "pct_cuda_time": 0.053291287280006906, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.053291287280006906, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 711.293, "cuda_time_us": 16.895, "pct_cuda_time": 0.24466203766187952, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.023, "pct_cuda_time": 0.18859033539335052, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 156.884, "cuda_time_us": 18.464, "pct_cuda_time": 0.26738324139620856, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.319, "pct_cuda_time": 0.2363207926963132, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.145, "pct_cuda_time": 0.031062448699895332, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.973, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.145, "cuda_time_us": 133.726, "pct_cuda_time": 1.936530076849512, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.322, "cuda_time_us": 81.438, "pct_cuda_time": 1.1793303949753269, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.438, "pct_cuda_time": 1.1793303949753269, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.483, "cuda_time_us": 8.672, "pct_cuda_time": 0.1255820769815815, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.672, "pct_cuda_time": 0.1255820769815815, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.937, "cuda_time_us": 43.616, "pct_cuda_time": 0.6316176048926037, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.616, "pct_cuda_time": 0.6316176048926037, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2387.566, "cuda_time_us": 198.907, "pct_cuda_time": 2.8804375214685694, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.047, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1663.341, "cuda_time_us": 58.783, "pct_cuda_time": 0.8512559076577842, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.649, "cuda_time_us": 20.768, "pct_cuda_time": 0.3007482212584738, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.768, "pct_cuda_time": 0.3007482212584738, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 463.939, "cuda_time_us": 3.615, "pct_cuda_time": 0.05235000095576766, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.615, "pct_cuda_time": 0.05235000095576766, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 729.335, "cuda_time_us": 16.736, "pct_cuda_time": 0.24235950649950966, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.928, "pct_cuda_time": 0.18721460922715472, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.397, "cuda_time_us": 17.664, "pct_cuda_time": 0.25579817894403317, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.224750211572203, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.685, "cuda_time_us": 3.359, "pct_cuda_time": 0.048642780971071525, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.359, "pct_cuda_time": 0.048642780971071525, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 498.786, "cuda_time_us": 133.501, "pct_cuda_time": 1.9332717780348376, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.479, "cuda_time_us": 80.574, "pct_cuda_time": 1.1668185275269773, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.574, "pct_cuda_time": 1.1668185275269773, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.875, "cuda_time_us": 8.8, "pct_cuda_time": 0.12743568697392957, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.8, "pct_cuda_time": 0.12743568697392957, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 163.444, "cuda_time_us": 44.127, "pct_cuda_time": 0.6390175635339307, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.127, "pct_cuda_time": 0.6390175635339307, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2335.26, "cuda_time_us": 200.85999999999999, "pct_cuda_time": 2.908719555179942, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.112, "cuda_time_us": 3.488, "pct_cuda_time": 0.05051087229148481, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.488, "pct_cuda_time": 0.05051087229148481, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1654.244, "cuda_time_us": 60.254999999999995, "pct_cuda_time": 0.872572422569787, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.446, "cuda_time_us": 21.28, "pct_cuda_time": 0.30816266122786606, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.28, "pct_cuda_time": 0.30816266122786606, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 513.126, "cuda_time_us": 3.711, "pct_cuda_time": 0.053740208450028706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.711, "pct_cuda_time": 0.053740208450028706, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 695.648, "cuda_time_us": 17.056, "pct_cuda_time": 0.24699353148037984, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.037535602345048345, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.96, "pct_cuda_time": 0.18767801172524173, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.02177991741008978, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 154.928, "cuda_time_us": 18.208, "pct_cuda_time": 0.2636760214115124, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.096, "pct_cuda_time": 0.23309145653776933, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.503, "cuda_time_us": 3.263, "pct_cuda_time": 0.04725257347681047, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.263, "pct_cuda_time": 0.04725257347681047, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.828, "cuda_time_us": 133.85399999999998, "pct_cuda_time": 1.9383836868418598, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.168, "cuda_time_us": 80.799, "pct_cuda_time": 1.1700768263416519, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.799, "pct_cuda_time": 1.1700768263416519, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.931, "cuda_time_us": 8.671, "pct_cuda_time": 0.12556759565351627, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.671, "pct_cuda_time": 0.12556759565351627, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.456, "cuda_time_us": 44.384, "pct_cuda_time": 0.6427392648466921, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.384, "pct_cuda_time": 0.6427392648466921, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2459.16, "cuda_time_us": 199.77499999999998, "pct_cuda_time": 2.893007314229179, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.661, "cuda_time_us": 3.36, "pct_cuda_time": 0.04865726229913674, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.36, "pct_cuda_time": 0.04865726229913674, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1739.34, "cuda_time_us": 58.848000000000006, "pct_cuda_time": 0.8521971939820235, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 245.81, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 473.118, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 702.222, "cuda_time_us": 16.896, "pct_cuda_time": 0.24467651898994477, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.056, "pct_cuda_time": 0.18906821921950276, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 159.167, "cuda_time_us": 17.728, "pct_cuda_time": 0.2567249839402072, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.2256770165683771, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.914, "cuda_time_us": 3.552, "pct_cuda_time": 0.05143767728765884, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.552, "pct_cuda_time": 0.05143767728765884, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 472.257, "cuda_time_us": 134.015, "pct_cuda_time": 1.94071518066036, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.909, "cuda_time_us": 80.863, "pct_cuda_time": 1.1710036313378258, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.863, "pct_cuda_time": 1.1710036313378258, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.637, "cuda_time_us": 9.024, "pct_cuda_time": 0.13067950446053866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.13067950446053866, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.582, "cuda_time_us": 44.128, "pct_cuda_time": 0.6390320448619958, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.128, "pct_cuda_time": 0.6390320448619958, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2420.078, "cuda_time_us": 201.37400000000002, "pct_cuda_time": 2.9161629578054655, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.179, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1717.778, "cuda_time_us": 59.935, "pct_cuda_time": 0.8679383975889169, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.214, "cuda_time_us": 21.536, "pct_cuda_time": 0.3118698812125622, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.536, "pct_cuda_time": 0.3118698812125622, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 505.98, "cuda_time_us": 3.68, "pct_cuda_time": 0.053291287280006906, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.053291287280006906, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 720.044, "cuda_time_us": 16.832, "pct_cuda_time": 0.24374971399377077, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.96, "pct_cuda_time": 0.18767801172524173, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.207, "cuda_time_us": 17.887, "pct_cuda_time": 0.2590275151025771, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.776, "pct_cuda_time": 0.22845743155689915, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.111, "pct_cuda_time": 0.03057008354567788, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.231, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 481.226, "cuda_time_us": 134.847, "pct_cuda_time": 1.9527636456106228, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.871, "cuda_time_us": 81.631, "pct_cuda_time": 1.182125291291914, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.631, "pct_cuda_time": 1.182125291291914, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.921, "cuda_time_us": 9.28, "pct_cuda_time": 0.1343867244452348, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.28, "pct_cuda_time": 0.1343867244452348, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.545, "cuda_time_us": 43.936, "pct_cuda_time": 0.6362516298734738, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.936, "pct_cuda_time": 0.6362516298734738, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2416.663, "cuda_time_us": 199.23000000000002, "pct_cuda_time": 2.8851149904336353, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.334, "cuda_time_us": 3.231, "pct_cuda_time": 0.046789170978723454, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.046789170978723454, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1756.371, "cuda_time_us": 58.944, "pct_cuda_time": 0.8535874014762846, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.244, "cuda_time_us": 20.64, "pct_cuda_time": 0.2988946112661257, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.64, "pct_cuda_time": 0.2988946112661257, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 472.803, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 720.395, "cuda_time_us": 16.896, "pct_cuda_time": 0.24467651898994477, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.088, "pct_cuda_time": 0.18953162171758978, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 156.023, "cuda_time_us": 17.792, "pct_cuda_time": 0.25765178893638124, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.68, "pct_cuda_time": 0.22706722406263813, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.807, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.503, "cuda_time_us": 133.791, "pct_cuda_time": 1.937471363173751, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.881, "cuda_time_us": 81.183, "pct_cuda_time": 1.1756376563186959, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.183, "pct_cuda_time": 1.1756376563186959, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.178, "cuda_time_us": 8.928, "pct_cuda_time": 0.12928929696627764, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12928929696627764, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.548, "cuda_time_us": 43.68, "pct_cuda_time": 0.6325444098887777, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.68, "pct_cuda_time": 0.6325444098887777, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2709.323, "cuda_time_us": 199.57999999999998, "pct_cuda_time": 2.8901834552564614, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.905, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1956.944, "cuda_time_us": 59.389, "pct_cuda_time": 0.8600315924653071, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.447, "cuda_time_us": 21.183, "pct_cuda_time": 0.30675797240553976, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.183, "pct_cuda_time": 0.30675797240553976, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.592, "cuda_time_us": 3.68, "pct_cuda_time": 0.053291287280006906, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.053291287280006906, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 998.956, "cuda_time_us": 16.703, "pct_cuda_time": 0.24188162267335744, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.831, "pct_cuda_time": 0.18580992040482844, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 188.82, "cuda_time_us": 17.823, "pct_cuda_time": 0.258100710106403, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.711, "pct_cuda_time": 0.22751614523265992, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.944, "cuda_time_us": 3.233, "pct_cuda_time": 0.046818133634853897, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.233, "pct_cuda_time": 0.046818133634853897, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 502.266, "cuda_time_us": 133.63, "pct_cuda_time": 1.9351398693552508, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 185.758, "cuda_time_us": 81.503, "pct_cuda_time": 1.180271681299566, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.503, "pct_cuda_time": 1.180271681299566, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 106.597, "cuda_time_us": 9.152, "pct_cuda_time": 0.13253311445288674, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.13253311445288674, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.785, "cuda_time_us": 42.975, "pct_cuda_time": 0.622335073602798, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.975, "pct_cuda_time": 0.622335073602798, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2402.072, "cuda_time_us": 200.991, "pct_cuda_time": 2.9106166091564862, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.823, "cuda_time_us": 3.392, "pct_cuda_time": 0.04912066479722376, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.392, "pct_cuda_time": 0.04912066479722376, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1684.1, "cuda_time_us": 59.135000000000005, "pct_cuda_time": 0.8563533351367416, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.242, "cuda_time_us": 21.056, "pct_cuda_time": 0.30491884374125694, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.056, "pct_cuda_time": 0.30491884374125694, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 513.753, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 706.959, "cuda_time_us": 16.799, "pct_cuda_time": 0.2432718301676185, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.927, "pct_cuda_time": 0.18720012789908946, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.155, "cuda_time_us": 17.664, "pct_cuda_time": 0.25579817894403317, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.224750211572203, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.142, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 492.473, "cuda_time_us": 135.168, "pct_cuda_time": 1.9574121519195582, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.679, "cuda_time_us": 81.888, "pct_cuda_time": 1.1858469926046755, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.888, "pct_cuda_time": 1.1858469926046755, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 119.411, "cuda_time_us": 8.896, "pct_cuda_time": 0.12882589446819062, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.12882589446819062, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.936, "cuda_time_us": 44.384, "pct_cuda_time": 0.6427392648466921, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.384, "pct_cuda_time": 0.6427392648466921, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2329.418, "cuda_time_us": 199.80599999999998, "pct_cuda_time": 2.893456235399201, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.178, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1668.073, "cuda_time_us": 59.328, "pct_cuda_time": 0.8591482314533287, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 182.283, "cuda_time_us": 21.152, "pct_cuda_time": 0.30630905123551794, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.152, "pct_cuda_time": 0.30630905123551794, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 488.2, "cuda_time_us": 3.808, "pct_cuda_time": 0.05514489727235498, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05514489727235498, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 696.98, "cuda_time_us": 16.64, "pct_cuda_time": 0.24096929900524863, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.768, "pct_cuda_time": 0.18489759673671963, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 155.317, "cuda_time_us": 17.728, "pct_cuda_time": 0.2567249839402072, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.615, "pct_cuda_time": 0.2261259377383989, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.113, "pct_cuda_time": 0.030599046201808314, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.412, "cuda_time_us": 3.392, "pct_cuda_time": 0.04912066479722376, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.392, "pct_cuda_time": 0.04912066479722376, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.126, "cuda_time_us": 133.886, "pct_cuda_time": 1.938847089339947, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.075, "cuda_time_us": 81.246, "pct_cuda_time": 1.1765499799868047, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.246, "pct_cuda_time": 1.1765499799868047, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.439, "cuda_time_us": 9.248, "pct_cuda_time": 0.1339233219471478, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.248, "pct_cuda_time": 0.1339233219471478, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.925, "cuda_time_us": 43.392, "pct_cuda_time": 0.6283737874059945, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.392, "pct_cuda_time": 0.6283737874059945, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2290.646, "cuda_time_us": 200.127, "pct_cuda_time": 2.8981047417081367, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.771, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1595.035, "cuda_time_us": 59.36, "pct_cuda_time": 0.8596116339514157, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.316, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 448.295, "cuda_time_us": 3.84, "pct_cuda_time": 0.05560829977044199, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05560829977044199, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 709.947, "cuda_time_us": 16.896, "pct_cuda_time": 0.24467651898994477, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.088, "pct_cuda_time": 0.18953162171758978, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 161.084, "cuda_time_us": 18.016, "pct_cuda_time": 0.2608956064229903, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.84, "pct_cuda_time": 0.2293842365530732, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.176, "pct_cuda_time": 0.031511369869917136, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.825, "cuda_time_us": 3.425, "pct_cuda_time": 0.049598548623375996, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.425, "pct_cuda_time": 0.049598548623375996, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.489, "cuda_time_us": 134.142, "pct_cuda_time": 1.9425543093246431, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.67, "cuda_time_us": 80.991, "pct_cuda_time": 1.1728572413301739, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.991, "pct_cuda_time": 1.1728572413301739, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.442, "cuda_time_us": 8.927, "pct_cuda_time": 0.12927481563821241, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.927, "pct_cuda_time": 0.12927481563821241, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.728, "cuda_time_us": 44.224, "pct_cuda_time": 0.6404222523562568, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.224, "pct_cuda_time": 0.6404222523562568, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2370.797, "cuda_time_us": 201.021, "pct_cuda_time": 2.9110510489984422, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.113, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1697.372, "cuda_time_us": 60.382000000000005, "pct_cuda_time": 0.87441155123407, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.876, "cuda_time_us": 21.279, "pct_cuda_time": 0.3081481798998008, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.279, "pct_cuda_time": 0.3081481798998008, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 532.29, "cuda_time_us": 3.712, "pct_cuda_time": 0.05375468977809393, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05375468977809393, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 710.071, "cuda_time_us": 16.959, "pct_cuda_time": 0.24558884265805359, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.928, "pct_cuda_time": 0.18721460922715472, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.631, "pct_cuda_time": 0.023619046074372627, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.735, "cuda_time_us": 18.432000000000002, "pct_cuda_time": 0.2669198388981216, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.32, "pct_cuda_time": 0.23633527402437846, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.307, "cuda_time_us": 3.456, "pct_cuda_time": 0.050047469793397796, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.456, "pct_cuda_time": 0.050047469793397796, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.759, "cuda_time_us": 133.91899999999998, "pct_cuda_time": 1.939324973166099, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.659, "cuda_time_us": 80.447, "pct_cuda_time": 1.1649793988626946, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.447, "pct_cuda_time": 1.1649793988626946, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.574, "cuda_time_us": 9.184, "pct_cuda_time": 0.13299651695097375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.13299651695097375, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.221, "cuda_time_us": 44.288, "pct_cuda_time": 0.641349057352431, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.288, "pct_cuda_time": 0.641349057352431, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2268.768, "cuda_time_us": 200.35, "pct_cuda_time": 2.9013340778666805, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.33, "cuda_time_us": 3.232, "pct_cuda_time": 0.04680365230678868, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04680365230678868, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1601.699, "cuda_time_us": 60.032000000000004, "pct_cuda_time": 0.8693430864112431, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.911, "cuda_time_us": 21.44, "pct_cuda_time": 0.3104796737183011, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.44, "pct_cuda_time": 0.3104796737183011, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 490.57, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 680.641, "cuda_time_us": 16.704, "pct_cuda_time": 0.24189610400142267, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.864, "pct_cuda_time": 0.18628780423098068, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 152.636, "cuda_time_us": 18.272, "pct_cuda_time": 0.26460282640768645, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.128, "pct_cuda_time": 0.23355485903585635, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.643, "cuda_time_us": 3.265, "pct_cuda_time": 0.047281536132940914, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.265, "pct_cuda_time": 0.047281536132940914, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 438.478, "cuda_time_us": 133.821, "pct_cuda_time": 1.9379058030157075, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.737, "cuda_time_us": 80.478, "pct_cuda_time": 1.1654283200327162, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.478, "pct_cuda_time": 1.1654283200327162, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.993, "cuda_time_us": 8.96, "pct_cuda_time": 0.12975269946436466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.12975269946436466, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.966, "cuda_time_us": 44.383, "pct_cuda_time": 0.6427247835186268, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.383, "pct_cuda_time": 0.6427247835186268, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2378.497, "cuda_time_us": 202.334, "pct_cuda_time": 2.9300650327480757, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.231, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1650.731, "cuda_time_us": 60.128, "pct_cuda_time": 0.8707332939055042, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 132.973, "cuda_time_us": 21.984, "pct_cuda_time": 0.3183575161857804, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.984, "pct_cuda_time": 0.3183575161857804, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 492.449, "cuda_time_us": 3.711, "pct_cuda_time": 0.053740208450028706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.711, "pct_cuda_time": 0.053740208450028706, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 715.0, "cuda_time_us": 16.64, "pct_cuda_time": 0.24096929900524863, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.768, "pct_cuda_time": 0.18489759673671963, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.583, "cuda_time_us": 17.793, "pct_cuda_time": 0.25766627026444644, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.681, "pct_cuda_time": 0.22708170539070335, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.089, "cuda_time_us": 3.231, "pct_cuda_time": 0.046789170978723454, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.046789170978723454, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 499.852, "cuda_time_us": 135.711, "pct_cuda_time": 1.9652755130589723, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.557, "cuda_time_us": 82.143, "pct_cuda_time": 1.1895397312613065, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.143, "pct_cuda_time": 1.1895397312613065, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.53, "cuda_time_us": 9.152, "pct_cuda_time": 0.13253311445288674, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.13253311445288674, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 170.5, "cuda_time_us": 44.416, "pct_cuda_time": 0.643202667344779, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.416, "pct_cuda_time": 0.643202667344779, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2624.147, "cuda_time_us": 199.933, "pct_cuda_time": 2.895295364063484, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.847, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1721.985, "cuda_time_us": 59.519000000000005, "pct_cuda_time": 0.8619141651137857, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.205, "cuda_time_us": 21.088, "pct_cuda_time": 0.30538224623934396, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.088, "pct_cuda_time": 0.30538224623934396, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 476.668, "cuda_time_us": 3.68, "pct_cuda_time": 0.053291287280006906, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.053291287280006906, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 796.226, "cuda_time_us": 17.088, "pct_cuda_time": 0.2474569339784669, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.248, "pct_cuda_time": 0.19184863420802487, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 163.541, "cuda_time_us": 17.663, "pct_cuda_time": 0.255783697615968, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.551, "pct_cuda_time": 0.22519913274222486, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.231, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 677.776, "cuda_time_us": 133.822, "pct_cuda_time": 1.937920284343773, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.089, "cuda_time_us": 81.471, "pct_cuda_time": 1.179808278801479, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.471, "pct_cuda_time": 1.179808278801479, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.35, "cuda_time_us": 8.64, "pct_cuda_time": 0.12511867448349448, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.64, "pct_cuda_time": 0.12511867448349448, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 363.276, "cuda_time_us": 43.711, "pct_cuda_time": 0.6329933310587994, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.711, "pct_cuda_time": 0.6329933310587994, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2469.202, "cuda_time_us": 200.92400000000004, "pct_cuda_time": 2.909646360176117, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.814, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1754.599, "cuda_time_us": 59.742, "pct_cuda_time": 0.8651435012723295, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.469, "cuda_time_us": 21.375, "pct_cuda_time": 0.30953838739406186, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.375, "pct_cuda_time": 0.30953838739406186, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 521.828, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 768.611, "cuda_time_us": 16.799, "pct_cuda_time": 0.2432718301676185, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.896, "pct_cuda_time": 0.1867512067290677, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.02176543608202456, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 179.059, "cuda_time_us": 17.951999999999998, "pct_cuda_time": 0.2599688014268163, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.22753062656072515, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.24, "pct_cuda_time": 0.032438174866091164, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.529, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 482.616, "cuda_time_us": 134.71800000000002, "pct_cuda_time": 1.9508955542902096, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 176.33, "cuda_time_us": 81.919, "pct_cuda_time": 1.1862959137746973, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.919, "pct_cuda_time": 1.1862959137746973, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.629, "cuda_time_us": 8.927, "pct_cuda_time": 0.12927481563821241, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.927, "pct_cuda_time": 0.12927481563821241, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.336, "cuda_time_us": 43.872, "pct_cuda_time": 0.6353248248772998, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.872, "pct_cuda_time": 0.6353248248772998, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2273.996, "cuda_time_us": 201.28000000000003, "pct_cuda_time": 2.9148017129673347, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.694, "cuda_time_us": 3.168, "pct_cuda_time": 0.045876847310614643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.045876847310614643, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1596.446, "cuda_time_us": 59.489000000000004, "pct_cuda_time": 0.8614797252718291, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.704, "cuda_time_us": 21.312, "pct_cuda_time": 0.3086260637259531, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.312, "pct_cuda_time": 0.3086260637259531, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 468.263, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 685.248, "cuda_time_us": 16.8, "pct_cuda_time": 0.24328631149568375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.96, "pct_cuda_time": 0.18767801172524173, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 155.461, "cuda_time_us": 17.729, "pct_cuda_time": 0.2567394652682724, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.2256770165683771, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.145, "pct_cuda_time": 0.031062448699895332, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.097, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 450.787, "cuda_time_us": 135.29500000000002, "pct_cuda_time": 1.9592512805838413, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.109, "cuda_time_us": 82.24, "pct_cuda_time": 1.1909444200836325, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.24, "pct_cuda_time": 1.1909444200836325, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.289, "cuda_time_us": 8.832, "pct_cuda_time": 0.12789908947201659, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.832, "pct_cuda_time": 0.12789908947201659, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.805, "cuda_time_us": 44.223, "pct_cuda_time": 0.6404077710281917, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.223, "pct_cuda_time": 0.6404077710281917, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2455.633, "cuda_time_us": 199.997, "pct_cuda_time": 2.896222169059658, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.758, "cuda_time_us": 3.233, "pct_cuda_time": 0.046818133634853897, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.233, "pct_cuda_time": 0.046818133634853897, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1742.655, "cuda_time_us": 59.551, "pct_cuda_time": 0.8623775676118727, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.49, "cuda_time_us": 21.28, "pct_cuda_time": 0.30816266122786606, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.28, "pct_cuda_time": 0.30816266122786606, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 512.855, "cuda_time_us": 3.711, "pct_cuda_time": 0.053740208450028706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.711, "pct_cuda_time": 0.053740208450028706, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 752.751, "cuda_time_us": 16.768, "pct_cuda_time": 0.24282290899759668, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.896, "pct_cuda_time": 0.1867512067290677, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.591, "cuda_time_us": 17.792, "pct_cuda_time": 0.25765178893638124, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.2266038215645511, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.961, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 464.475, "cuda_time_us": 133.917, "pct_cuda_time": 1.9392960105099686, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.46, "cuda_time_us": 81.662, "pct_cuda_time": 1.182574212461936, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.662, "pct_cuda_time": 1.182574212461936, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.363, "cuda_time_us": 8.864, "pct_cuda_time": 0.1283624919701036, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.1283624919701036, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.25, "cuda_time_us": 43.391, "pct_cuda_time": 0.6283593060779292, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.391, "pct_cuda_time": 0.6283593060779292, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2341.925, "cuda_time_us": 199.29399999999998, "pct_cuda_time": 2.8860417954298088, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.703, "cuda_time_us": 3.233, "pct_cuda_time": 0.046818133634853897, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.233, "pct_cuda_time": 0.046818133634853897, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1653.972, "cuda_time_us": 59.038, "pct_cuda_time": 0.8549486463144151, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.912, "cuda_time_us": 20.736, "pct_cuda_time": 0.30028481876038676, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.736, "pct_cuda_time": 0.30028481876038676, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 472.358, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 708.424, "cuda_time_us": 16.639, "pct_cuda_time": 0.2409548176771834, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.831, "pct_cuda_time": 0.18580992040482844, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.419, "cuda_time_us": 18.047, "pct_cuda_time": 0.26134452759301213, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.903, "pct_cuda_time": 0.23029656022118206, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.09, "cuda_time_us": 3.392, "pct_cuda_time": 0.04912066479722376, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.392, "pct_cuda_time": 0.04912066479722376, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.467, "cuda_time_us": 133.631, "pct_cuda_time": 1.9351543506833162, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.701, "cuda_time_us": 81.663, "pct_cuda_time": 1.182588693790001, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.663, "pct_cuda_time": 1.182588693790001, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.325, "cuda_time_us": 9.024, "pct_cuda_time": 0.13067950446053866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.13067950446053866, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.496, "cuda_time_us": 42.944, "pct_cuda_time": 0.6218861524327763, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.944, "pct_cuda_time": 0.6218861524327763, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2268.375, "cuda_time_us": 200.443, "pct_cuda_time": 2.9026808413767458, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.738, "cuda_time_us": 3.231, "pct_cuda_time": 0.046789170978723454, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.046789170978723454, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1585.389, "cuda_time_us": 59.93300000000001, "pct_cuda_time": 0.8679094349327865, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.696, "cuda_time_us": 21.376, "pct_cuda_time": 0.3095528687221271, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.376, "pct_cuda_time": 0.3095528687221271, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 444.627, "cuda_time_us": 3.679, "pct_cuda_time": 0.05327680595194168, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.679, "pct_cuda_time": 0.05327680595194168, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 692.677, "cuda_time_us": 16.702, "pct_cuda_time": 0.24186714134529225, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.367, "pct_cuda_time": 0.034277303530374004, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.831, "pct_cuda_time": 0.18580992040482844, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.02177991741008978, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.672, "cuda_time_us": 18.176, "pct_cuda_time": 0.2632126189134254, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.84, "pct_cuda_time": 0.2293842365530732, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.336, "pct_cuda_time": 0.03382838236035221, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.492, "cuda_time_us": 3.232, "pct_cuda_time": 0.04680365230678868, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04680365230678868, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.8, "cuda_time_us": 134.047, "pct_cuda_time": 1.9411785831584472, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.145, "cuda_time_us": 81.568, "pct_cuda_time": 1.1812129676238052, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.568, "pct_cuda_time": 1.1812129676238052, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.963, "cuda_time_us": 8.832, "pct_cuda_time": 0.12789908947201659, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.832, "pct_cuda_time": 0.12789908947201659, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.104, "cuda_time_us": 43.647, "pct_cuda_time": 0.6320665260626254, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.647, "pct_cuda_time": 0.6320665260626254, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2323.88, "cuda_time_us": 200.47700000000003, "pct_cuda_time": 2.9031732065309637, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.206, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1652.395, "cuda_time_us": 59.999, "pct_cuda_time": 0.8688652025850909, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.618, "cuda_time_us": 20.863, "pct_cuda_time": 0.3021239474246696, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.863, "pct_cuda_time": 0.3021239474246696, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.306, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 702.638, "cuda_time_us": 17.12, "pct_cuda_time": 0.2479203364765539, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.03429178485843922, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.312, "pct_cuda_time": 0.1927754392041989, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 156.642, "cuda_time_us": 18.4, "pct_cuda_time": 0.2664564364000345, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.256, "pct_cuda_time": 0.23540846902820445, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.998, "cuda_time_us": 3.424, "pct_cuda_time": 0.04958406729531077, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.424, "pct_cuda_time": 0.04958406729531077, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 450.247, "cuda_time_us": 133.79000000000002, "pct_cuda_time": 1.9374568818456863, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.137, "cuda_time_us": 80.415, "pct_cuda_time": 1.1645159963646077, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.415, "pct_cuda_time": 1.1645159963646077, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.805, "cuda_time_us": 8.768, "pct_cuda_time": 0.12697228447584255, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.768, "pct_cuda_time": 0.12697228447584255, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.554, "cuda_time_us": 44.607, "pct_cuda_time": 0.6459686010052359, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.607, "pct_cuda_time": 0.6459686010052359, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2218.95, "cuda_time_us": 199.87100000000004, "pct_cuda_time": 2.894397521723441, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.236, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04726705480487569, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1559.685, "cuda_time_us": 59.999, "pct_cuda_time": 0.8688652025850909, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.22, "cuda_time_us": 21.888, "pct_cuda_time": 0.3169673086915194, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.888, "pct_cuda_time": 0.3169673086915194, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 470.611, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 665.135, "cuda_time_us": 16.64, "pct_cuda_time": 0.24096929900524863, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.768, "pct_cuda_time": 0.18489759673671963, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021316514912002763, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 149.248, "cuda_time_us": 17.823, "pct_cuda_time": 0.258100710106403, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.679, "pct_cuda_time": 0.2270527427345729, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.856, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 443.559, "cuda_time_us": 133.312, "pct_cuda_time": 1.9305348070305113, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.308, "cuda_time_us": 81.28, "pct_cuda_time": 1.1770423451410221, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.28, "pct_cuda_time": 1.1770423451410221, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.001, "cuda_time_us": 8.896, "pct_cuda_time": 0.12882589446819062, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.12882589446819062, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.794, "cuda_time_us": 43.136, "pct_cuda_time": 0.6246665674212984, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.136, "pct_cuda_time": 0.6246665674212984, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2708.386, "cuda_time_us": 200.60500000000002, "pct_cuda_time": 2.9050268165233115, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.268, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2008.808, "cuda_time_us": 59.295, "pct_cuda_time": 0.8586703476271766, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 173.57, "cuda_time_us": 20.832, "pct_cuda_time": 0.3016750262546478, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.832, "pct_cuda_time": 0.3016750262546478, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 533.373, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 981.347, "cuda_time_us": 17.023, "pct_cuda_time": 0.24651564765422762, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.034755187356526246, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.055, "pct_cuda_time": 0.18905373789143753, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.568, "pct_cuda_time": 0.022706722406263813, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.305, "cuda_time_us": 17.792, "pct_cuda_time": 0.25765178893638124, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22521361407029009, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.24, "pct_cuda_time": 0.032438174866091164, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.303, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.555, "cuda_time_us": 134.686, "pct_cuda_time": 1.9504321517921226, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.464, "cuda_time_us": 81.119, "pct_cuda_time": 1.174710851322522, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.119, "pct_cuda_time": 1.174710851322522, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.236, "cuda_time_us": 8.896, "pct_cuda_time": 0.12882589446819062, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.12882589446819062, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.018, "cuda_time_us": 44.671, "pct_cuda_time": 0.6468954060014099, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.671, "pct_cuda_time": 0.6468954060014099, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2335.324, "cuda_time_us": 201.11599999999999, "pct_cuda_time": 2.912426775164638, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.946, "cuda_time_us": 3.231, "pct_cuda_time": 0.046789170978723454, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.046789170978723454, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1660.47, "cuda_time_us": 60.48, "pct_cuda_time": 0.8758307213844614, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.004, "cuda_time_us": 21.984, "pct_cuda_time": 0.3183575161857804, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.984, "pct_cuda_time": 0.3183575161857804, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 481.09, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052827884781919895, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 707.608, "cuda_time_us": 16.799, "pct_cuda_time": 0.2432718301676185, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.0361453948507873, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.832, "pct_cuda_time": 0.18582440173289366, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.471, "pct_cuda_time": 0.021302033583937546, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 188.359, "cuda_time_us": 18.049, "pct_cuda_time": 0.2613734902491426, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.904, "pct_cuda_time": 0.23031104154924725, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.145, "pct_cuda_time": 0.031062448699895332, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.166, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 453.659, "cuda_time_us": 134.077, "pct_cuda_time": 1.9416130230004036, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.304, "cuda_time_us": 81.694, "pct_cuda_time": 1.183037614960023, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.694, "pct_cuda_time": 1.183037614960023, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.714, "cuda_time_us": 8.576, "pct_cuda_time": 0.12419186948732046, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.576, "pct_cuda_time": 0.12419186948732046, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.312, "cuda_time_us": 43.807, "pct_cuda_time": 0.6343835385530605, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.807, "pct_cuda_time": 0.6343835385530605, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2322.997, "cuda_time_us": 200.19100000000003, "pct_cuda_time": 2.899031546704311, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.804, "cuda_time_us": 3.232, "pct_cuda_time": 0.04680365230678868, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04680365230678868, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1667.691, "cuda_time_us": 58.785000000000004, "pct_cuda_time": 0.8512848703139148, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.261, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.2984312087680387, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 474.805, "cuda_time_us": 3.584, "pct_cuda_time": 0.05190107978574586, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.584, "pct_cuda_time": 0.05190107978574586, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 695.782, "cuda_time_us": 16.865000000000002, "pct_cuda_time": 0.24422759781992298, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.0361453948507873, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.896, "pct_cuda_time": 0.1867512067290677, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.473, "pct_cuda_time": 0.021330996240067984, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 223.366, "cuda_time_us": 17.728, "pct_cuda_time": 0.2567249839402072, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.2261404190664641, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.536, "cuda_time_us": 3.456, "pct_cuda_time": 0.050047469793397796, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.456, "pct_cuda_time": 0.050047469793397796, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.732, "cuda_time_us": 134.71800000000002, "pct_cuda_time": 1.9508955542902096, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.079, "cuda_time_us": 81.631, "pct_cuda_time": 1.182125291291914, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.631, "pct_cuda_time": 1.182125291291914, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.772, "cuda_time_us": 8.896, "pct_cuda_time": 0.12882589446819062, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.12882589446819062, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.19, "cuda_time_us": 44.191, "pct_cuda_time": 0.6399443685301047, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.191, "pct_cuda_time": 0.6399443685301047, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2368.312, "cuda_time_us": 201.981, "pct_cuda_time": 2.9249531239410533, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.965, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.048193859801049725, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1670.944, "cuda_time_us": 60.159000000000006, "pct_cuda_time": 0.871182215075526, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.149, "cuda_time_us": 21.408, "pct_cuda_time": 0.31001627122021413, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.408, "pct_cuda_time": 0.31001627122021413, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 476.157, "cuda_time_us": 3.615, "pct_cuda_time": 0.05235000095576766, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.615, "pct_cuda_time": 0.05235000095576766, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 760.619, "cuda_time_us": 17.088, "pct_cuda_time": 0.2474569339784669, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.752, "pct_cuda_time": 0.03985261483548343, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.832, "pct_cuda_time": 0.18582440173289366, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.02177991741008978, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.138, "cuda_time_us": 18.048000000000002, "pct_cuda_time": 0.2613590089210774, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.936, "pct_cuda_time": 0.23077444404733427, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030584564873743097, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.954, "cuda_time_us": 3.424, "pct_cuda_time": 0.04958406729531077, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.424, "pct_cuda_time": 0.04958406729531077, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.542, "cuda_time_us": 135.07, "pct_cuda_time": 1.9559929817691664, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.293, "cuda_time_us": 81.855, "pct_cuda_time": 1.1853691087785234, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.855, "pct_cuda_time": 1.1853691087785234, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.393, "cuda_time_us": 9.184, "pct_cuda_time": 0.13299651695097375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.13299651695097375, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.136, "cuda_time_us": 44.031, "pct_cuda_time": 0.6376273560396696, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.031, "pct_cuda_time": 0.6376273560396696, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2270.224, "cuda_time_us": 199.39, "pct_cuda_time": 2.88743200292407, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.634, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04773045730296271, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1605.784, "cuda_time_us": 58.913000000000004, "pct_cuda_time": 0.8531384803062628, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.18, "cuda_time_us": 20.64, "pct_cuda_time": 0.2988946112661257, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.64, "pct_cuda_time": 0.2988946112661257, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 465.253, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05236448228383288, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 701.208, "cuda_time_us": 16.865000000000002, "pct_cuda_time": 0.24422759781992298, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.497, "pct_cuda_time": 0.03615987617885252, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 12.928, "pct_cuda_time": 0.18721460922715472, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020853112413915745, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 154.708, "cuda_time_us": 17.792, "pct_cuda_time": 0.25765178893638124, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.2266038215645511, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.031047967371830115, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.806, "cuda_time_us": 3.488, "pct_cuda_time": 0.05051087229148481, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.488, "pct_cuda_time": 0.05051087229148481, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.614, "cuda_time_us": 133.69299999999998, "pct_cuda_time": 1.9360521930233594, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.623, "cuda_time_us": 81.663, "pct_cuda_time": 1.182588693790001, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.663, "pct_cuda_time": 1.182588693790001, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.69, "cuda_time_us": 8.767, "pct_cuda_time": 0.12695780314777733, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.767, "pct_cuda_time": 0.12695780314777733, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.324, "cuda_time_us": 43.263, "pct_cuda_time": 0.6265056960855813, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.263, "pct_cuda_time": 0.6265056960855813, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.178, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04634024980870166, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 555.919, "cuda_time_us": 352.187, "pct_cuda_time": 5.100135487305379, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 5.471, "pct_cuda_time": 0.07922734584481463, "trace": "index_select(bfloat16[8, 4096], 0, int64[8])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010658257456001382, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[8, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 345.98, "pct_cuda_time": 5.010249884004563, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[8, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3870.667, "cuda_time_us": 120.443, "pct_cuda_time": 1.7441745961592043, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010658257456001382, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.010643776127936162, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011121659954088398, "trace": "copy_(int32[8], int32[8], True) <- _to_copy(int32[8], 3, 0, None, None, True, None) <- to(int32[8], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011121659954088398, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011585062452175415, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011585062452175415, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011585062452175415, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 4.639, "pct_cuda_time": 0.06717888089455219, "trace": "copy_(float32[8, 128256], bfloat16[8, 128256], False) <- _to_copy(bfloat16[8, 128256], 6, None, None, None, False, None) <- to(bfloat16[8, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 6.399, "pct_cuda_time": 0.0926660182893381, "trace": "div_(float32[8, 128256], bfloat16[8, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 35.295, "pct_cuda_time": 0.5111184740619141, "trace": "_softmax(float32[8, 128256], -1, False) <- softmax(float32[8, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.575, "pct_cuda_time": 0.41380394946364063, "trace": "_log_softmax(float32[8, 128256], -1, False) <- log_softmax(float32[8, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.025023734896698898, "trace": "copy_(int64[8], int32[8], False) <- _to_copy(int32[8], 4, None, None, None, False, None) <- to(int32[8], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 7.584, "pct_cuda_time": 0.10982639204662292, "trace": "index(float32[8, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 28.32, "pct_cuda_time": 0.4101112108070097, "trace": "argmax(float32[8, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.0361453948507873, "trace": "copy_(int64[8], int64[8], False) <- _to_copy(int64[8], 4, 0, None, None, False, None) <- to(int64[8], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] } }