{ "context": { "python_version": "3.12.9 | packaged by Anaconda, Inc. | (main, Feb 6 2025, 18:56:27) [GCC 11.2.0]", "torch_version": "2.5.1+cu124", "engine_args": { "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "served_model_name": null, "tokenizer": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "task": "auto", "skip_tokenizer_init": false, "tokenizer_mode": "auto", "trust_remote_code": false, "allowed_local_media_path": null, "download_dir": null, "load_format": "dummy", "config_format": "auto", "dtype": "auto", "kv_cache_dtype": "auto", "seed": 0, "max_model_len": null, "distributed_executor_backend": null, "pipeline_parallel_size": 1, "tensor_parallel_size": 1, "max_parallel_loading_workers": null, "block_size": null, "enable_prefix_caching": false, "disable_sliding_window": false, "use_v2_block_manager": true, "swap_space": 4, "cpu_offload_gb": 0, "gpu_memory_utilization": 0.9, "max_num_batched_tokens": 8000, "max_num_partial_prefills": 1, "max_long_partial_prefills": 1, "long_prefill_token_threshold": 0, "max_num_seqs": 256, "max_logprobs": 20, "disable_log_stats": false, "revision": null, "code_revision": null, "rope_scaling": null, "rope_theta": null, "hf_overrides": null, "tokenizer_revision": null, "quantization": null, "enforce_eager": true, "max_seq_len_to_capture": 8192, "disable_custom_all_reduce": false, "tokenizer_pool_size": 0, "tokenizer_pool_type": "ray", "tokenizer_pool_extra_config": null, "limit_mm_per_prompt": null, "mm_processor_kwargs": null, "disable_mm_preprocessor_cache": false, "enable_lora": false, "enable_lora_bias": false, "max_loras": 1, "max_lora_rank": 16, "enable_prompt_adapter": false, "max_prompt_adapters": 1, "max_prompt_adapter_token": 0, "fully_sharded_loras": false, "lora_extra_vocab_size": 256, "long_lora_scaling_factors": null, "lora_dtype": "auto", "max_cpu_loras": null, "device": "auto", "num_scheduler_steps": 1, "multi_step_stream_outputs": true, "ray_workers_use_nsight": false, "num_gpu_blocks_override": null, "num_lookahead_slots": 0, "model_loader_extra_config": null, "ignore_patterns": [], "preemption_mode": null, "scheduler_delay_factor": 0.0, "enable_chunked_prefill": null, "guided_decoding_backend": "xgrammar", "logits_processor_pattern": null, "speculative_model": null, "speculative_model_quantization": null, "speculative_draft_tensor_parallel_size": null, "num_speculative_tokens": null, "speculative_disable_mqa_scorer": false, "speculative_max_model_len": null, "speculative_disable_by_batch_size": null, "ngram_prompt_lookup_max": null, "ngram_prompt_lookup_min": null, "spec_decoding_acceptance_method": "rejection_sampler", "typical_acceptance_sampler_posterior_threshold": null, "typical_acceptance_sampler_posterior_alpha": null, "qlora_adapter_name_or_path": null, "disable_logprobs_during_spec_decoding": null, "otlp_traces_endpoint": null, "collect_detailed_traces": null, "disable_async_output_proc": false, "scheduling_policy": "fcfs", "scheduler_cls": "vllm.core.scheduler.Scheduler", "override_neuron_config": null, "override_pooler_config": null, "compilation_config": null, "worker_cls": "auto", "kv_transfer_config": null, "generation_config": null, "override_generation_config": null, "enable_sleep_mode": false, "model_impl": "auto", "calculate_kv_scales": false, "additional_config": null }, "prompt_len": 0, "batch_size": 6, "num_steps": 2, "complete_num_requests_per_step": null, "save_chrome_traces_folder": null }, "prefill": { "metadata": { "num_running_seqs": null }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 66330.022, "pct_cuda_time": 99.2671979053835, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 87.999, "pct_cuda_time": 0.13169623475288222, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 87.999, "pct_cuda_time": 0.13169623475288222, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 66208.583, "pct_cuda_time": 99.08545653272978, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 2059.4, "pct_cuda_time": 3.0820262258671764, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 48.992, "pct_cuda_time": 0.0733197187810453, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 2010.4080000000001, "pct_cuda_time": 3.0087065070861314, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 15219.695000000002, "pct_cuda_time": 22.777264805137197, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 6727.779000000001, "pct_cuda_time": 10.068559444419952, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 25.08800000000001, "pct_cuda_time": 0.03754582594666202, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 6702.690999999999, "pct_cuda_time": 10.031013618473288, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 1296.6199999999997, "pct_cuda_time": 1.9404762770631725, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 1296.6199999999997, "pct_cuda_time": 1.9404762770631725, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 2349.8229999999994, "pct_cuda_time": 3.516663160214569, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 517.0200000000001, "pct_cuda_time": 0.7737541027958862, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 1787.557, "pct_cuda_time": 2.675195471609426, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 45.24600000000001, "pct_cuda_time": 0.06771358580925818, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 4845.473, "pct_cuda_time": 7.2515659234395, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.679000000000006, "pct_cuda_time": 0.03543716568044522, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 4821.794, "pct_cuda_time": 7.216128757759053, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 48929.48799999999, "pct_cuda_time": 73.22616550172539, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 30631.078, "pct_cuda_time": 45.84140318664811, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.650000000000006, "pct_cuda_time": 0.03539376529171542, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 30607.427999999993, "pct_cuda_time": 45.80600942135638, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 4211.269, "pct_cuda_time": 6.302438332612138, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 4211.269, "pct_cuda_time": 6.302438332612138, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 14087.141000000005, "pct_cuda_time": 21.082323982465173, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.714000000000006, "pct_cuda_time": 0.0354895454599467, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 14063.426999999998, "pct_cuda_time": 21.046834437005217, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 33.44, "pct_cuda_time": 0.05004513790084411, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 33.44, "pct_cuda_time": 0.05004513790084411, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 364.79499999999996, "pct_cuda_time": 0.5459394760926564, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 5.216, "pct_cuda_time": 0.00780608371084937, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 358.811, "pct_cuda_time": 0.5369840303630317, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 124.861, "pct_cuda_time": 0.18686261852384264, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 12.256000000000002, "pct_cuda_time": 0.018341902216290243, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 4.383, "pct_cuda_time": 0.006559444958714108, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 5.6, "pct_cuda_time": 0.008380764720237053, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 35.679, "pct_cuda_time": 0.05339594722381033, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.383, "pct_cuda_time": 0.04247700804544434, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.952, "pct_cuda_time": 0.0029212951310540586, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 6.176, "pct_cuda_time": 0.00924278623431858, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 27.84, "pct_cuda_time": 0.04166437318060707, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.592, "pct_cuda_time": 0.003879096813366865, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 82312.768, "cuda_time_us": 66330.022, "pct_cuda_time": 99.2671979053835, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 296.797, "cuda_time_us": 87.999, "pct_cuda_time": 0.13169623475288222, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 87.999, "pct_cuda_time": 0.13169623475288222, "trace": "index_select(bfloat16[128256, 4096], 0, int64[3072]) <- embedding(bfloat16[128256, 4096], int64[3072], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4054.702, "cuda_time_us": 2090.084, "pct_cuda_time": 3.1279468302735607, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 235.109, "cuda_time_us": 48.992, "pct_cuda_time": 0.0733197187810453, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.992, "pct_cuda_time": 0.0733197187810453, "trace": "_C::rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2980.23, "cuda_time_us": 481.9459999999999, "pct_cuda_time": 0.7212635774748869, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 396.545, "cuda_time_us": 216.06099999999998, "pct_cuda_time": 0.3233493582534175, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 215.325, "pct_cuda_time": 0.3222478863187578, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 955.362, "cuda_time_us": 40.544, "pct_cuda_time": 0.060676736574516256, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.544, "pct_cuda_time": 0.060676736574516256, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1052.888, "cuda_time_us": 73.183, "pct_cuda_time": 0.10952312580734078, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.16, "pct_cuda_time": 0.024184492478398358, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.743, "pct_cuda_time": 0.0834230299643168, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0019156033646256121, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 311.571, "cuda_time_us": 152.158, "pct_cuda_time": 0.22771435683961241, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.422, "pct_cuda_time": 0.22661288490495268, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 117.137, "cuda_time_us": 31.232, "pct_cuda_time": 0.04674072209686494, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.232, "pct_cuda_time": 0.04674072209686494, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 602.204, "cuda_time_us": 1527.914, "pct_cuda_time": 2.286622811920764, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 196.982, "cuda_time_us": 956.1469999999999, "pct_cuda_time": 1.4309362580286602, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.411, "pct_cuda_time": 1.4298347860940006, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 141.342, "cuda_time_us": 131.806, "pct_cuda_time": 0.1972562633420652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.806, "pct_cuda_time": 0.1972562633420652, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 183.235, "cuda_time_us": 439.961, "pct_cuda_time": 0.6584302905500383, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.226, "pct_cuda_time": 0.6573303151805072, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2699.276, "cuda_time_us": 2066.341, "pct_cuda_time": 3.0924138844248845, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.124, "cuda_time_us": 32.448, "pct_cuda_time": 0.04856054529325927, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.448, "pct_cuda_time": 0.04856054529325927, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1932.086, "cuda_time_us": 475.067, "pct_cuda_time": 0.7109687059551529, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 168.183, "cuda_time_us": 209.405, "pct_cuda_time": 0.31338822075736433, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.669, "pct_cuda_time": 0.31228674882270463, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 539.5, "cuda_time_us": 40.8, "pct_cuda_time": 0.06105985724744139, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.8, "pct_cuda_time": 0.06105985724744139, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 835.148, "cuda_time_us": 73.344, "pct_cuda_time": 0.10976407279304756, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.064, "pct_cuda_time": 0.024040822226051434, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.775, "pct_cuda_time": 0.08347092004843243, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.505, "pct_cuda_time": 0.002252330518563708, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 225.536, "cuda_time_us": 151.518, "pct_cuda_time": 0.22675655515729962, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.782, "pct_cuda_time": 0.22565508322263994, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 92.746, "cuda_time_us": 30.687, "pct_cuda_time": 0.045925094101770446, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.687, "pct_cuda_time": 0.045925094101770446, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 494.818, "cuda_time_us": 1528.139, "pct_cuda_time": 2.2869595390747017, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.81, "cuda_time_us": 956.306, "pct_cuda_time": 1.43117421188411, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.571, "pct_cuda_time": 1.4300742365145789, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.497, "cuda_time_us": 131.935, "pct_cuda_time": 0.19744932024365636, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.935, "pct_cuda_time": 0.19744932024365636, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 168.236, "cuda_time_us": 439.89799999999997, "pct_cuda_time": 0.6583360069469355, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.162, "pct_cuda_time": 0.6572345350122758, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2590.836, "cuda_time_us": 2069.765, "pct_cuda_time": 3.0975381234252577, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.09, "cuda_time_us": 32.959, "pct_cuda_time": 0.049325290073980906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.959, "pct_cuda_time": 0.049325290073980906, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1841.788, "cuda_time_us": 478.01, "pct_cuda_time": 0.7153730971286633, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 161.026, "cuda_time_us": 211.422, "pct_cuda_time": 0.3164067926217783, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 210.685, "pct_cuda_time": 0.31530382412198993, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 544.109, "cuda_time_us": 40.383, "pct_cuda_time": 0.060435789588809456, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.383, "pct_cuda_time": 0.060435789588809456, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 766.862, "cuda_time_us": 74.30300000000001, "pct_cuda_time": 0.1111992787513882, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.512, "pct_cuda_time": 0.024711283403670396, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.287, "pct_cuda_time": 0.0842371613942827, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0022508339534350946, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 202.956, "cuda_time_us": 151.902, "pct_cuda_time": 0.22733123616668727, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.166, "pct_cuda_time": 0.2262297642320276, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.809, "cuda_time_us": 31.551, "pct_cuda_time": 0.04721812637289273, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.551, "pct_cuda_time": 0.04721812637289273, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 499.094, "cuda_time_us": 1527.245, "pct_cuda_time": 2.285621609849721, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.616, "cuda_time_us": 956.3389999999999, "pct_cuda_time": 1.4312235985333541, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.603, "pct_cuda_time": 1.4301221265986943, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.41, "cuda_time_us": 131.935, "pct_cuda_time": 0.19744932024365636, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.935, "pct_cuda_time": 0.19744932024365636, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 177.162, "cuda_time_us": 438.971, "pct_cuda_time": 0.6569486910727107, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 438.234, "pct_cuda_time": 0.6558457225729223, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2490.857, "cuda_time_us": 2068.4179999999997, "pct_cuda_time": 3.0955222501970145, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.01, "cuda_time_us": 32.703, "pct_cuda_time": 0.048942169401055786, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.703, "pct_cuda_time": 0.048942169401055786, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1781.754, "cuda_time_us": 477.687, "pct_cuda_time": 0.714889706592121, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.35, "cuda_time_us": 211.453, "pct_cuda_time": 0.3164531861407653, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.888, "pct_cuda_time": 0.002825514962822778, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.565, "pct_cuda_time": 0.31362767117794255, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 501.376, "cuda_time_us": 40.608, "pct_cuda_time": 0.06077251674274754, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.608, "pct_cuda_time": 0.06077251674274754, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 760.559, "cuda_time_us": 74.077, "pct_cuda_time": 0.11086105503232146, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.447, "pct_cuda_time": 0.024614006670310502, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.351, "pct_cuda_time": 0.08433294156251396, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.279, "pct_cuda_time": 0.0019141067994969984, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 202.205, "cuda_time_us": 151.549, "pct_cuda_time": 0.22680294867628664, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.814, "pct_cuda_time": 0.22570297330675554, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 93.533, "cuda_time_us": 30.4, "pct_cuda_time": 0.04549557990985829, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.4, "pct_cuda_time": 0.04549557990985829, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 474.551, "cuda_time_us": 1527.6279999999997, "pct_cuda_time": 2.28619479429398, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.595, "cuda_time_us": 954.675, "pct_cuda_time": 1.4287333141593408, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 953.939, "pct_cuda_time": 1.4276318422246812, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.53, "cuda_time_us": 132.447, "pct_cuda_time": 0.1982155615895066, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 132.447, "pct_cuda_time": 0.1982155615895066, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.084, "cuda_time_us": 440.506, "pct_cuda_time": 0.6592459185451327, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.77, "pct_cuda_time": 0.658144446610473, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2522.55, "cuda_time_us": 2069.923, "pct_cuda_time": 3.097774580715579, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.134, "cuda_time_us": 33.151, "pct_cuda_time": 0.049612630578674745, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.151, "pct_cuda_time": 0.049612630578674745, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1804.677, "cuda_time_us": 475.193, "pct_cuda_time": 0.7111572731613582, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.915, "cuda_time_us": 209.85299999999998, "pct_cuda_time": 0.31405868193498326, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.117, "pct_cuda_time": 0.31295721000032356, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 489.406, "cuda_time_us": 40.319, "pct_cuda_time": 0.060340009420578176, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.319, "pct_cuda_time": 0.060340009420578176, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 786.038, "cuda_time_us": 73.11999999999999, "pct_cuda_time": 0.10942884220423807, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.024232382562513994, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.616, "pct_cuda_time": 0.08323296619298284, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0019634934487412527, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 221.289, "cuda_time_us": 151.901, "pct_cuda_time": 0.22732973960155872, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.166, "pct_cuda_time": 0.2262297642320276, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.944, "cuda_time_us": 31.263, "pct_cuda_time": 0.04678711561585197, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.263, "pct_cuda_time": 0.04678711561585197, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 482.363, "cuda_time_us": 1530.3159999999998, "pct_cuda_time": 2.2902175613596936, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.315, "cuda_time_us": 957.524, "pct_cuda_time": 1.4329970282107616, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 956.756, "pct_cuda_time": 1.4318476661919861, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.761, "cuda_time_us": 131.422, "pct_cuda_time": 0.19668158233267752, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.422, "pct_cuda_time": 0.19668158233267752, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 157.467, "cuda_time_us": 441.37, "pct_cuda_time": 0.6605389508162551, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 440.634, "pct_cuda_time": 0.6594374788815953, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2540.129, "cuda_time_us": 2069.093, "pct_cuda_time": 3.0965324316588294, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.262, "cuda_time_us": 32.928, "pct_cuda_time": 0.04927889655499387, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.928, "pct_cuda_time": 0.04927889655499387, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1819.521, "cuda_time_us": 477.146, "pct_cuda_time": 0.7140800648575409, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.807, "cuda_time_us": 210.27, "pct_cuda_time": 0.31468274959361525, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.502, "pct_cuda_time": 0.3135333875748399, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 560.75, "cuda_time_us": 40.639, "pct_cuda_time": 0.06081891026173458, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.639, "pct_cuda_time": 0.06081891026173458, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.995, "cuda_time_us": 74.366, "pct_cuda_time": 0.11129356235449084, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.224, "pct_cuda_time": 0.024280272646629634, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.639, "pct_cuda_time": 0.08476395231955473, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.0022493373883064804, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 199.662, "cuda_time_us": 151.87099999999998, "pct_cuda_time": 0.22728484264770027, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.135, "pct_cuda_time": 0.22618337071304054, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.132, "cuda_time_us": 31.744, "pct_cuda_time": 0.04750696344271518, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.744, "pct_cuda_time": 0.04750696344271518, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 468.315, "cuda_time_us": 1527.275, "pct_cuda_time": 2.2856665068035795, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.817, "cuda_time_us": 956.083, "pct_cuda_time": 1.4308404778604291, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.347, "pct_cuda_time": 1.4297390059257693, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.357, "cuda_time_us": 131.934, "pct_cuda_time": 0.19744782367852776, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.934, "pct_cuda_time": 0.19744782367852776, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.599, "cuda_time_us": 439.258, "pct_cuda_time": 0.6573782052646228, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 438.522, "pct_cuda_time": 0.6562767333299631, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2427.58, "cuda_time_us": 2067.908, "pct_cuda_time": 3.094759001981422, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.643, "cuda_time_us": 32.8, "pct_cuda_time": 0.049087336218531306, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.8, "pct_cuda_time": 0.049087336218531306, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1745.797, "cuda_time_us": 475.64199999999994, "pct_cuda_time": 0.7118292309041058, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.586, "cuda_time_us": 209.85299999999998, "pct_cuda_time": 0.31405868193498326, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.117, "pct_cuda_time": 0.31295721000032356, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.632, "cuda_time_us": 40.511, "pct_cuda_time": 0.060627349925272016, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.511, "pct_cuda_time": 0.060627349925272016, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 740.879, "cuda_time_us": 73.63199999999999, "pct_cuda_time": 0.11019508355008834, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 15.936, "pct_cuda_time": 0.02384926188958887, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.224, "pct_cuda_time": 0.08414287779118002, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.002202943869319454, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.22, "cuda_time_us": 151.646, "pct_cuda_time": 0.22694811549376215, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.91, "pct_cuda_time": 0.22584664355910244, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.119, "cuda_time_us": 31.36, "pct_cuda_time": 0.0469322824333275, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.36, "pct_cuda_time": 0.0469322824333275, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 462.532, "cuda_time_us": 1528.106, "pct_cuda_time": 2.286910152425458, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.387, "cuda_time_us": 954.995, "pct_cuda_time": 1.4292122150004973, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 954.227, "pct_cuda_time": 1.4280628529817219, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.813, "cuda_time_us": 131.966, "pct_cuda_time": 0.1974957137626434, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.966, "pct_cuda_time": 0.1974957137626434, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 159.13, "cuda_time_us": 441.14500000000004, "pct_cuda_time": 0.660202223662317, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 440.41, "pct_cuda_time": 0.6591022482927859, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2515.035, "cuda_time_us": 2072.453, "pct_cuda_time": 3.1015608904909717, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.358, "cuda_time_us": 32.576, "pct_cuda_time": 0.048752105629721834, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.576, "pct_cuda_time": 0.048752105629721834, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1796.55, "cuda_time_us": 475.994, "pct_cuda_time": 0.7123560218293779, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.311, "cuda_time_us": 210.461, "pct_cuda_time": 0.31496859353318046, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.693, "pct_cuda_time": 0.3138192315144051, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 495.87, "cuda_time_us": 40.192, "pct_cuda_time": 0.06014994564924423, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.192, "pct_cuda_time": 0.06014994564924423, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.813, "cuda_time_us": 73.40599999999999, "pct_cuda_time": 0.10985685983102161, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.095, "pct_cuda_time": 0.02408721574503846, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.999, "pct_cuda_time": 0.08380615063724192, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0019634934487412527, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 250.364, "cuda_time_us": 151.935, "pct_cuda_time": 0.22738062281593158, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.198, "pct_cuda_time": 0.22627765431614322, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.233, "cuda_time_us": 31.2, "pct_cuda_time": 0.0466928320127493, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.2, "pct_cuda_time": 0.0466928320127493, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 488.006, "cuda_time_us": 1532.683, "pct_cuda_time": 2.293759931019123, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.228, "cuda_time_us": 958.739, "pct_cuda_time": 1.4348153548420273, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 958.003, "pct_cuda_time": 1.4337138829073675, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.373, "cuda_time_us": 131.486, "pct_cuda_time": 0.19677736250090877, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.486, "pct_cuda_time": 0.19677736250090877, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 158.231, "cuda_time_us": 442.45799999999997, "pct_cuda_time": 0.6621672136761868, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 441.722, "pct_cuda_time": 0.6610657417415271, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2426.288, "cuda_time_us": 2067.5879999999997, "pct_cuda_time": 3.0942801011402654, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.07, "cuda_time_us": 32.544, "pct_cuda_time": 0.04870421554560619, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.544, "pct_cuda_time": 0.04870421554560619, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1728.691, "cuda_time_us": 474.96999999999997, "pct_cuda_time": 0.7108235391376773, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.392, "cuda_time_us": 209.501, "pct_cuda_time": 0.31353189100971124, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.733, "pct_cuda_time": 0.3123825289909359, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.102, "cuda_time_us": 40.575, "pct_cuda_time": 0.0607231300935033, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.575, "pct_cuda_time": 0.0607231300935033, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 737.911, "cuda_time_us": 73.28, "pct_cuda_time": 0.10966829262481631, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.672, "pct_cuda_time": 0.0249507338242486, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.328, "pct_cuda_time": 0.0828019554359421, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0019156033646256121, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.924, "cuda_time_us": 151.61399999999998, "pct_cuda_time": 0.22690022540964652, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.878, "pct_cuda_time": 0.2257987534749868, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.312, "cuda_time_us": 31.327, "pct_cuda_time": 0.04688289578408325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.327, "pct_cuda_time": 0.04688289578408325, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.242, "cuda_time_us": 1528.7469999999998, "pct_cuda_time": 2.287869450672899, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.356, "cuda_time_us": 956.3389999999999, "pct_cuda_time": 1.4312235985333541, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.603, "pct_cuda_time": 1.4301221265986943, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.492, "cuda_time_us": 132.03, "pct_cuda_time": 0.19759149393087466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 132.03, "pct_cuda_time": 0.19759149393087466, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 163.321, "cuda_time_us": 440.378, "pct_cuda_time": 0.6590543582086702, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.642, "pct_cuda_time": 0.6579528862740105, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2430.033, "cuda_time_us": 2068.131, "pct_cuda_time": 3.095092736005103, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.845, "cuda_time_us": 32.736, "pct_cuda_time": 0.04899155605030003, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.736, "pct_cuda_time": 0.04899155605030003, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1723.108, "cuda_time_us": 476.825, "pct_cuda_time": 0.7135996674512559, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.543, "cuda_time_us": 210.109, "pct_cuda_time": 0.31444180260790844, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.341, "pct_cuda_time": 0.3132924405891331, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 482.436, "cuda_time_us": 40.767, "pct_cuda_time": 0.06101047059819714, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.767, "pct_cuda_time": 0.06101047059819714, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 732.941, "cuda_time_us": 73.759, "pct_cuda_time": 0.1103851473214223, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.224, "pct_cuda_time": 0.024280272646629634, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.031, "pct_cuda_time": 0.08385404072135756, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0022508339534350946, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 211.865, "cuda_time_us": 152.19, "pct_cuda_time": 0.22776224692372804, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.422, "pct_cuda_time": 0.22661288490495268, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.882, "cuda_time_us": 30.847, "pct_cuda_time": 0.046164544522348645, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.847, "pct_cuda_time": 0.046164544522348645, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.485, "cuda_time_us": 1527.723, "pct_cuda_time": 2.2863369679811987, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.519, "cuda_time_us": 957.011, "pct_cuda_time": 1.4322292902997826, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 956.275, "pct_cuda_time": 1.4311278183651228, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.642, "cuda_time_us": 131.358, "pct_cuda_time": 0.19658580216444624, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.358, "pct_cuda_time": 0.19658580216444624, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.605, "cuda_time_us": 439.354, "pct_cuda_time": 0.6575218755169696, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 438.618, "pct_cuda_time": 0.65642040358231, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2464.363, "cuda_time_us": 2072.4869999999996, "pct_cuda_time": 3.1016117737053444, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.364, "cuda_time_us": 32.864, "pct_cuda_time": 0.04918311638676259, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.864, "pct_cuda_time": 0.04918311638676259, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1760.907, "cuda_time_us": 476.66499999999996, "pct_cuda_time": 0.7133602170306776, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.101, "cuda_time_us": 211.069, "pct_cuda_time": 0.3158785051313776, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 210.301, "pct_cuda_time": 0.31472914311260225, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 530.616, "cuda_time_us": 40.575, "pct_cuda_time": 0.0607231300935033, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.575, "pct_cuda_time": 0.0607231300935033, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 714.53, "cuda_time_us": 73.63000000000001, "pct_cuda_time": 0.11019209041983112, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.256, "pct_cuda_time": 0.024328162730745277, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.871, "pct_cuda_time": 0.08361459030077936, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.0022493373883064804, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.855, "cuda_time_us": 151.391, "pct_cuda_time": 0.22656649138596566, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.654, "pct_cuda_time": 0.22546352288617733, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.02, "cuda_time_us": 30.912, "pct_cuda_time": 0.04626182125570853, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.912, "pct_cuda_time": 0.04626182125570853, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 479.275, "cuda_time_us": 1532.0459999999998, "pct_cuda_time": 2.292806619032196, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.391, "cuda_time_us": 959.2529999999999, "pct_cuda_time": 1.4355845893181345, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 958.516, "pct_cuda_time": 1.4344816208183464, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.641, "cuda_time_us": 131.519, "pct_cuda_time": 0.19682674915015305, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.519, "pct_cuda_time": 0.19682674915015305, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.492, "cuda_time_us": 441.274, "pct_cuda_time": 0.6603952805639082, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 440.538, "pct_cuda_time": 0.6592938086292484, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2407.236, "cuda_time_us": 2068.2619999999997, "pct_cuda_time": 3.095288786036951, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.718, "cuda_time_us": 32.384, "pct_cuda_time": 0.04846476512502799, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.384, "pct_cuda_time": 0.04846476512502799, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1738.319, "cuda_time_us": 476.025, "pct_cuda_time": 0.7124024153483649, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.847, "cuda_time_us": 209.661, "pct_cuda_time": 0.31377134143028945, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.925, "pct_cuda_time": 0.3126698694956297, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.839, "cuda_time_us": 41.088, "pct_cuda_time": 0.06149086800448215, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 41.088, "pct_cuda_time": 0.06149086800448215, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 714.92, "cuda_time_us": 73.75899999999999, "pct_cuda_time": 0.11038514732142227, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.064, "pct_cuda_time": 0.024040822226051434, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.351, "pct_cuda_time": 0.08433294156251396, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.002011383532856893, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 187.725, "cuda_time_us": 151.51700000000002, "pct_cuda_time": 0.22675505859217104, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.782, "pct_cuda_time": 0.22565508322263994, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.699, "cuda_time_us": 30.88, "pct_cuda_time": 0.04621393117159289, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.88, "pct_cuda_time": 0.04621393117159289, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 453.435, "cuda_time_us": 1528.973, "pct_cuda_time": 2.2882076743919657, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.975, "cuda_time_us": 956.98, "pct_cuda_time": 1.4321828967807957, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 956.244, "pct_cuda_time": 1.4310814248461359, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.751, "cuda_time_us": 131.742, "pct_cuda_time": 0.1971604831738339, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.742, "pct_cuda_time": 0.1971604831738339, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.631, "cuda_time_us": 440.251, "pct_cuda_time": 0.6588642944373362, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.515, "pct_cuda_time": 0.6577628225026765, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2547.896, "cuda_time_us": 2066.691, "pct_cuda_time": 3.0929376822198993, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.042, "cuda_time_us": 32.575, "pct_cuda_time": 0.04875060906459323, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.575, "pct_cuda_time": 0.04875060906459323, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1852.305, "cuda_time_us": 475.385, "pct_cuda_time": 0.711444613666052, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.744, "cuda_time_us": 209.50099999999998, "pct_cuda_time": 0.3135318910097112, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.765, "pct_cuda_time": 0.3124304190750515, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 499.642, "cuda_time_us": 40.255, "pct_cuda_time": 0.06024422925234689, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.255, "pct_cuda_time": 0.06024422925234689, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 835.483, "cuda_time_us": 73.759, "pct_cuda_time": 0.1103851473214223, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.128, "pct_cuda_time": 0.024136602394282714, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.159, "pct_cuda_time": 0.08404560105782012, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.002202943869319454, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 211.666, "cuda_time_us": 151.87, "pct_cuda_time": 0.22728334608257167, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.102, "pct_cuda_time": 0.2261339840637963, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.278, "cuda_time_us": 30.72, "pct_cuda_time": 0.04597448075101469, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.72, "pct_cuda_time": 0.04597448075101469, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.715, "cuda_time_us": 1528.011, "pct_cuda_time": 2.2867679787382396, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.528, "cuda_time_us": 956.723, "pct_cuda_time": 1.4317982795427417, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.987, "pct_cuda_time": 1.4306968076080822, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.283, "cuda_time_us": 131.55, "pct_cuda_time": 0.1968731426691401, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.55, "pct_cuda_time": 0.1968731426691401, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 157.756, "cuda_time_us": 439.738, "pct_cuda_time": 0.6580965565263575, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.002, "pct_cuda_time": 0.6569950845916976, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2357.085, "cuda_time_us": 2068.899, "pct_cuda_time": 3.0962420980238785, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.973, "cuda_time_us": 32.192, "pct_cuda_time": 0.04817742462033415, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.192, "pct_cuda_time": 0.04817742462033415, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1691.596, "cuda_time_us": 475.769, "pct_cuda_time": 0.7120192946754398, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.31, "cuda_time_us": 209.949, "pct_cuda_time": 0.3142023521873302, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.181, "pct_cuda_time": 0.31305299016855487, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 488.363, "cuda_time_us": 40.223, "pct_cuda_time": 0.06019633916823125, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.223, "pct_cuda_time": 0.06019633916823125, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 719.778, "cuda_time_us": 74.111, "pct_cuda_time": 0.11091193824669433, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.224, "pct_cuda_time": 0.024280272646629634, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.383, "pct_cuda_time": 0.08438083164662961, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0022508339534350946, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 187.883, "cuda_time_us": 151.486, "pct_cuda_time": 0.22670866507318396, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.75, "pct_cuda_time": 0.22560719313852426, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.868, "cuda_time_us": 30.751, "pct_cuda_time": 0.046020874270001726, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.751, "pct_cuda_time": 0.046020874270001726, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.692, "cuda_time_us": 1530.187, "pct_cuda_time": 2.2900245044581027, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.768, "cuda_time_us": 959.251, "pct_cuda_time": 1.4355815961878773, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 958.515, "pct_cuda_time": 1.4344801242532177, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.392, "cuda_time_us": 131.454, "pct_cuda_time": 0.19672947241679314, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.454, "pct_cuda_time": 0.19672947241679314, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.925, "cuda_time_us": 439.48199999999997, "pct_cuda_time": 0.6577134358534322, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 438.714, "pct_cuda_time": 0.6565640738346569, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2409.957, "cuda_time_us": 2070.8509999999997, "pct_cuda_time": 3.099163393154932, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.224, "cuda_time_us": 33.951, "pct_cuda_time": 0.05080988268156575, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.951, "pct_cuda_time": 0.05080988268156575, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1716.139, "cuda_time_us": 476.057, "pct_cuda_time": 0.7124503054324806, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 174.122, "cuda_time_us": 209.564, "pct_cuda_time": 0.3136261746128139, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.828, "pct_cuda_time": 0.3125247026781542, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 462.663, "cuda_time_us": 40.128, "pct_cuda_time": 0.060054165481012944, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.128, "pct_cuda_time": 0.060054165481012944, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 725.479, "cuda_time_us": 74.335, "pct_cuda_time": 0.1112471688355038, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.032, "pct_cuda_time": 0.02399293214193579, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.831, "pct_cuda_time": 0.08505129282424857, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.002202943869319454, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 205.735, "cuda_time_us": 152.03, "pct_cuda_time": 0.22752279650314985, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.294, "pct_cuda_time": 0.22642132456849015, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.43, "cuda_time_us": 30.944, "pct_cuda_time": 0.04630971133982417, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.944, "pct_cuda_time": 0.04630971133982417, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.696, "cuda_time_us": 1529.899, "pct_cuda_time": 2.289593493701062, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.004, "cuda_time_us": 958.067, "pct_cuda_time": 1.4338096630755988, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 957.331, "pct_cuda_time": 1.4327081911409392, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.596, "cuda_time_us": 131.55, "pct_cuda_time": 0.1968731426691401, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.55, "pct_cuda_time": 0.1968731426691401, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.826, "cuda_time_us": 440.282, "pct_cuda_time": 0.6589106879563233, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.546, "pct_cuda_time": 0.6578092160216635, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2427.016, "cuda_time_us": 2069.06, "pct_cuda_time": 3.096483045009585, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.931, "cuda_time_us": 33.056, "pct_cuda_time": 0.04947045689145643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.056, "pct_cuda_time": 0.04947045689145643, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1733.511, "cuda_time_us": 474.586, "pct_cuda_time": 0.7102488581282898, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.654, "cuda_time_us": 209.69299999999998, "pct_cuda_time": 0.31381923151440505, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.957, "pct_cuda_time": 0.31271775957974535, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 515.737, "cuda_time_us": 40.383, "pct_cuda_time": 0.060435789588809456, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.383, "pct_cuda_time": 0.060435789588809456, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 701.518, "cuda_time_us": 73.18299999999999, "pct_cuda_time": 0.10952312580734076, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 15.968, "pct_cuda_time": 0.023897151973704515, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.903, "pct_cuda_time": 0.083662480384895, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0019634934487412527, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 213.178, "cuda_time_us": 151.327, "pct_cuda_time": 0.22647071121773438, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.59, "pct_cuda_time": 0.22536774271794605, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.643, "cuda_time_us": 31.871, "pct_cuda_time": 0.04769702721404913, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.871, "pct_cuda_time": 0.04769702721404913, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 464.59, "cuda_time_us": 1529.547, "pct_cuda_time": 2.28906670277579, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.928, "cuda_time_us": 957.491, "pct_cuda_time": 1.4329476415615172, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 956.755, "pct_cuda_time": 1.4318461696268576, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.873, "cuda_time_us": 131.518, "pct_cuda_time": 0.19682525258502442, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.518, "pct_cuda_time": 0.19682525258502442, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.256, "cuda_time_us": 440.53799999999995, "pct_cuda_time": 0.6592938086292484, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.77, "pct_cuda_time": 0.658144446610473, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2351.505, "cuda_time_us": 2073.891, "pct_cuda_time": 3.1037129511459183, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.626, "cuda_time_us": 33.664, "pct_cuda_time": 0.050380368489653604, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.664, "pct_cuda_time": 0.050380368489653604, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1664.769, "cuda_time_us": 475.32099999999997, "pct_cuda_time": 0.7113488334978207, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.175, "cuda_time_us": 210.045, "pct_cuda_time": 0.3143460224396771, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.309, "pct_cuda_time": 0.31324455050501737, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 474.527, "cuda_time_us": 40.223, "pct_cuda_time": 0.06019633916823125, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.223, "pct_cuda_time": 0.06019633916823125, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 709.827, "cuda_time_us": 73.567, "pct_cuda_time": 0.11009780681672844, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.0, "pct_cuda_time": 0.02394504205782015, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.255, "pct_cuda_time": 0.08418927131016705, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0019634934487412527, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.422, "cuda_time_us": 151.486, "pct_cuda_time": 0.22670866507318396, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.75, "pct_cuda_time": 0.22560719313852426, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.07, "cuda_time_us": 30.847, "pct_cuda_time": 0.046164544522348645, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.847, "pct_cuda_time": 0.046164544522348645, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.773, "cuda_time_us": 1534.059, "pct_cuda_time": 2.295819204636095, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.81, "cuda_time_us": 961.427, "pct_cuda_time": 1.438838121907741, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 960.659, "pct_cuda_time": 1.4376887598889656, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.153, "cuda_time_us": 131.806, "pct_cuda_time": 0.1972562633420652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.806, "pct_cuda_time": 0.1972562633420652, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.923, "cuda_time_us": 440.82599999999996, "pct_cuda_time": 0.6597248193862891, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 440.09, "pct_cuda_time": 0.6586233474516294, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2439.666, "cuda_time_us": 2068.4539999999997, "pct_cuda_time": 3.095576126541645, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.419, "cuda_time_us": 33.12, "pct_cuda_time": 0.04956623705968771, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.12, "pct_cuda_time": 0.04956623705968771, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1724.515, "cuda_time_us": 474.84299999999996, "pct_cuda_time": 0.7106334753663434, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.995, "cuda_time_us": 209.534, "pct_cuda_time": 0.3135812776589555, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.798, "pct_cuda_time": 0.31247980572429573, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 471.188, "cuda_time_us": 40.319, "pct_cuda_time": 0.060340009420578176, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.319, "pct_cuda_time": 0.060340009420578176, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 743.292, "cuda_time_us": 73.183, "pct_cuda_time": 0.10952312580734078, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.064, "pct_cuda_time": 0.024040822226051434, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.487, "pct_cuda_time": 0.08303990929139168, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.0024423942898976554, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 214.126, "cuda_time_us": 151.807, "pct_cuda_time": 0.227189062479469, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.07, "pct_cuda_time": 0.22608609397968063, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.95, "cuda_time_us": 30.72, "pct_cuda_time": 0.04597448075101469, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.72, "pct_cuda_time": 0.04597448075101469, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.278, "cuda_time_us": 1529.771, "pct_cuda_time": 2.2894019333645996, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.151, "cuda_time_us": 958.259, "pct_cuda_time": 1.4340970035802927, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 957.523, "pct_cuda_time": 1.432995531645633, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.455, "cuda_time_us": 131.454, "pct_cuda_time": 0.19672947241679314, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.454, "pct_cuda_time": 0.19672947241679314, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 157.015, "cuda_time_us": 440.058, "pct_cuda_time": 0.6585754573675138, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.322, "pct_cuda_time": 0.6574739854328541, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2396.779, "cuda_time_us": 2070.244, "pct_cuda_time": 3.0982549781218642, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.529, "cuda_time_us": 32.8, "pct_cuda_time": 0.049087336218531306, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.8, "pct_cuda_time": 0.049087336218531306, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1706.2, "cuda_time_us": 477.24199999999996, "pct_cuda_time": 0.7142237351098878, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 157.676, "cuda_time_us": 210.91, "pct_cuda_time": 0.315640551275928, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 210.142, "pct_cuda_time": 0.31449118925715264, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 485.961, "cuda_time_us": 41.087, "pct_cuda_time": 0.06148937143935355, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 41.087, "pct_cuda_time": 0.06148937143935355, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 715.189, "cuda_time_us": 74.111, "pct_cuda_time": 0.11091193824669433, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.288, "pct_cuda_time": 0.024376052814860917, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.319, "pct_cuda_time": 0.08428505147839832, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0022508339534350946, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.171, "cuda_time_us": 151.134, "pct_cuda_time": 0.2261818741479119, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.398, "pct_cuda_time": 0.22508040221325218, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.868, "cuda_time_us": 31.135, "pct_cuda_time": 0.046595555279389404, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.135, "pct_cuda_time": 0.046595555279389404, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 462.512, "cuda_time_us": 1529.067, "pct_cuda_time": 2.2883483515140557, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.538, "cuda_time_us": 956.787, "pct_cuda_time": 1.4318940597109733, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 956.051, "pct_cuda_time": 1.4307925877763135, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.854, "cuda_time_us": 131.55, "pct_cuda_time": 0.1968731426691401, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.55, "pct_cuda_time": 0.1968731426691401, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.96, "cuda_time_us": 440.73, "pct_cuda_time": 0.6595811491339423, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.994, "pct_cuda_time": 0.6584796771992826, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2487.746, "cuda_time_us": 2074.148, "pct_cuda_time": 3.104097568383972, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.048, "cuda_time_us": 33.28, "pct_cuda_time": 0.04980568748026592, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.28, "pct_cuda_time": 0.04980568748026592, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1779.334, "cuda_time_us": 475.51300000000003, "pct_cuda_time": 0.7116361740025147, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.644, "cuda_time_us": 210.20499999999998, "pct_cuda_time": 0.3145854728602553, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.469, "pct_cuda_time": 0.3134840009255956, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 467.222, "cuda_time_us": 40.096, "pct_cuda_time": 0.060006275396897304, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.096, "pct_cuda_time": 0.060006275396897304, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 795.194, "cuda_time_us": 73.247, "pct_cuda_time": 0.10961890597557206, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.0, "pct_cuda_time": 0.02394504205782015, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.967, "pct_cuda_time": 0.08375826055312628, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0019156033646256121, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 215.276, "cuda_time_us": 151.965, "pct_cuda_time": 0.22742551976979, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 151.23, "pct_cuda_time": 0.22632554440025887, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.866, "cuda_time_us": 31.584, "pct_cuda_time": 0.047267513022136984, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.584, "pct_cuda_time": 0.047267513022136984, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 476.82, "cuda_time_us": 1533.771, "pct_cuda_time": 2.2953881938790546, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.664, "cuda_time_us": 961.107, "pct_cuda_time": 1.4383592210665848, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 960.371, "pct_cuda_time": 1.437257749131925, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.542, "cuda_time_us": 131.422, "pct_cuda_time": 0.19668158233267752, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.422, "pct_cuda_time": 0.19668158233267752, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.377, "cuda_time_us": 441.24199999999996, "pct_cuda_time": 0.6603473904797924, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 440.506, "pct_cuda_time": 0.6592459185451327, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2488.81, "cuda_time_us": 2066.404, "pct_cuda_time": 3.092508168027987, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.598, "cuda_time_us": 32.703, "pct_cuda_time": 0.048942169401055786, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.703, "pct_cuda_time": 0.048942169401055786, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1798.055, "cuda_time_us": 476.442, "pct_cuda_time": 0.7130264830069968, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.883, "cuda_time_us": 210.493, "pct_cuda_time": 0.31501648361729606, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.757, "pct_cuda_time": 0.31391501168263636, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 522.225, "cuda_time_us": 41.375, "pct_cuda_time": 0.0619203821963943, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 41.375, "pct_cuda_time": 0.0619203821963943, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 757.309, "cuda_time_us": 73.08699999999999, "pct_cuda_time": 0.10937945555499383, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.16, "pct_cuda_time": 0.024184492478398358, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.583, "pct_cuda_time": 0.08318357954373859, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.002011383532856893, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 209.219, "cuda_time_us": 151.487, "pct_cuda_time": 0.22671016163831262, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.75, "pct_cuda_time": 0.22560719313852426, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.443, "cuda_time_us": 30.944, "pct_cuda_time": 0.04630971133982417, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.944, "pct_cuda_time": 0.04630971133982417, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 461.971, "cuda_time_us": 1526.315, "pct_cuda_time": 2.2842298042801104, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.547, "cuda_time_us": 955.411, "pct_cuda_time": 1.4298347860940006, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 954.675, "pct_cuda_time": 1.4287333141593408, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.272, "cuda_time_us": 130.974, "pct_cuda_time": 0.19601112115505853, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 130.974, "pct_cuda_time": 0.19601112115505853, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.051, "cuda_time_us": 439.93, "pct_cuda_time": 0.6583838970310513, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.194, "pct_cuda_time": 0.6572824250963916, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2344.98, "cuda_time_us": 2065.636, "pct_cuda_time": 3.091358806009212, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.001, "cuda_time_us": 32.352, "pct_cuda_time": 0.04841687504091235, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.352, "pct_cuda_time": 0.04841687504091235, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1678.804, "cuda_time_us": 474.07399999999996, "pct_cuda_time": 0.7094826167824394, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.868, "cuda_time_us": 209.661, "pct_cuda_time": 0.31377134143028945, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.893, "pct_cuda_time": 0.3126219794115141, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 482.907, "cuda_time_us": 40.64, "pct_cuda_time": 0.06082040682686318, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.64, "pct_cuda_time": 0.06082040682686318, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 708.3, "cuda_time_us": 72.991, "pct_cuda_time": 0.10923578530264694, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.024232382562513994, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.327, "pct_cuda_time": 0.08280045887081347, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.002202943869319454, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.835, "cuda_time_us": 150.78199999999998, "pct_cuda_time": 0.22565508322263989, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.046, "pct_cuda_time": 0.22455361128798015, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.234, "cuda_time_us": 31.103, "pct_cuda_time": 0.046547665195273764, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.103, "pct_cuda_time": 0.046547665195273764, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.156, "cuda_time_us": 1528.107, "pct_cuda_time": 2.286911648990586, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.45, "cuda_time_us": 956.915, "pct_cuda_time": 1.4320856200474357, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 956.179, "pct_cuda_time": 1.4309841481127759, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.863, "cuda_time_us": 131.294, "pct_cuda_time": 0.19649002199621496, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.294, "pct_cuda_time": 0.19649002199621496, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.532, "cuda_time_us": 439.89799999999997, "pct_cuda_time": 0.6583360069469355, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.13, "pct_cuda_time": 0.6571866449281603, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2494.754, "cuda_time_us": 2066.7569999999996, "pct_cuda_time": 3.093036455518387, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.744, "cuda_time_us": 32.512, "pct_cuda_time": 0.048656325461490554, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.512, "pct_cuda_time": 0.048656325461490554, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1784.061, "cuda_time_us": 473.178, "pct_cuda_time": 0.7081416944272014, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.852, "cuda_time_us": 209.981, "pct_cuda_time": 0.3142502422714458, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.213, "pct_cuda_time": 0.31310088025267047, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 517.777, "cuda_time_us": 40.351, "pct_cuda_time": 0.060387899504693816, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.351, "pct_cuda_time": 0.060387899504693816, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 737.743, "cuda_time_us": 72.54299999999999, "pct_cuda_time": 0.10856532412502795, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.024232382562513994, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.879, "pct_cuda_time": 0.08212999769319451, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.002202943869319454, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 225.714, "cuda_time_us": 150.303, "pct_cuda_time": 0.2249382285260339, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.567, "pct_cuda_time": 0.2238367565913742, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.8, "cuda_time_us": 30.463, "pct_cuda_time": 0.04558986351296096, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.463, "pct_cuda_time": 0.04558986351296096, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 476.366, "cuda_time_us": 1530.6039999999998, "pct_cuda_time": 2.2906485721167344, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.63, "cuda_time_us": 959.251, "pct_cuda_time": 1.4355815961878773, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 958.515, "pct_cuda_time": 1.4344801242532177, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.726, "cuda_time_us": 131.743, "pct_cuda_time": 0.19716197973896252, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.743, "pct_cuda_time": 0.19716197973896252, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.239, "cuda_time_us": 439.61, "pct_cuda_time": 0.6579049961898948, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 438.873, "pct_cuda_time": 0.6568020276901064, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2430.102, "cuda_time_us": 2064.5159999999996, "pct_cuda_time": 3.089682653065164, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.602, "cuda_time_us": 32.8, "pct_cuda_time": 0.049087336218531306, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.8, "pct_cuda_time": 0.049087336218531306, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1716.123, "cuda_time_us": 475.065, "pct_cuda_time": 0.7109657128248956, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 159.359, "cuda_time_us": 209.885, "pct_cuda_time": 0.3141065720190989, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.149, "pct_cuda_time": 0.31300510008443916, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.901, "cuda_time_us": 40.608, "pct_cuda_time": 0.06077251674274754, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.608, "pct_cuda_time": 0.06077251674274754, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 737.111, "cuda_time_us": 73.31, "pct_cuda_time": 0.10971318957867472, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 15.967, "pct_cuda_time": 0.023895655408575897, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.063, "pct_cuda_time": 0.08390193080547322, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0019156033646256121, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.747, "cuda_time_us": 151.262, "pct_cuda_time": 0.2263734344843745, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.494, "pct_cuda_time": 0.22522407246559914, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 95.689, "cuda_time_us": 30.464, "pct_cuda_time": 0.04559136007808957, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.464, "pct_cuda_time": 0.04559136007808957, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.362, "cuda_time_us": 1526.187, "pct_cuda_time": 2.2840382439436477, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.986, "cuda_time_us": 955.347, "pct_cuda_time": 1.4297390059257693, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 954.611, "pct_cuda_time": 1.4286375339911097, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.797, "cuda_time_us": 131.614, "pct_cuda_time": 0.19696892283737139, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.614, "pct_cuda_time": 0.19696892283737139, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.164, "cuda_time_us": 439.226, "pct_cuda_time": 0.6573303151805072, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 438.49, "pct_cuda_time": 0.6562288432458475, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2484.31, "cuda_time_us": 2065.6369999999997, "pct_cuda_time": 3.09136030257434, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.786, "cuda_time_us": 32.832, "pct_cuda_time": 0.04913522630264695, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.832, "pct_cuda_time": 0.04913522630264695, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1759.334, "cuda_time_us": 474.52, "pct_cuda_time": 0.7101500848298012, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.336, "cuda_time_us": 209.437, "pct_cuda_time": 0.31343611084148, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.669, "pct_cuda_time": 0.31228674882270463, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 490.574, "cuda_time_us": 40.799, "pct_cuda_time": 0.06105836068231277, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.799, "pct_cuda_time": 0.06105836068231277, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.295, "cuda_time_us": 72.734, "pct_cuda_time": 0.10885116806459319, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 15.871, "pct_cuda_time": 0.023751985156228977, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.455, "pct_cuda_time": 0.08299201920727603, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.002107163701088173, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 214.912, "cuda_time_us": 151.54999999999998, "pct_cuda_time": 0.22680444524141524, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.814, "pct_cuda_time": 0.22570297330675554, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.17, "cuda_time_us": 30.784, "pct_cuda_time": 0.04607026091924597, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.784, "pct_cuda_time": 0.04607026091924597, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 479.901, "cuda_time_us": 1527.501, "pct_cuda_time": 2.2860047305226465, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.416, "cuda_time_us": 955.732, "pct_cuda_time": 1.4303151835002856, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 954.995, "pct_cuda_time": 1.4292122150004973, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.474, "cuda_time_us": 131.454, "pct_cuda_time": 0.19672947241679314, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.454, "pct_cuda_time": 0.19672947241679314, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 160.752, "cuda_time_us": 440.315, "pct_cuda_time": 0.6589600746055676, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.547, "pct_cuda_time": 0.6578107125867921, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2416.052, "cuda_time_us": 2068.131, "pct_cuda_time": 3.095092736005103, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.37, "cuda_time_us": 32.319, "pct_cuda_time": 0.048367488391668094, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.319, "pct_cuda_time": 0.048367488391668094, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1723.126, "cuda_time_us": 475.193, "pct_cuda_time": 0.7111572731613582, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.367, "cuda_time_us": 210.46099999999998, "pct_cuda_time": 0.3149685935331804, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.725, "pct_cuda_time": 0.3138671215985207, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.13, "cuda_time_us": 40.959, "pct_cuda_time": 0.06129781110289098, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.959, "pct_cuda_time": 0.06129781110289098, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 742.899, "cuda_time_us": 72.383, "pct_cuda_time": 0.10832587370444977, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.064, "pct_cuda_time": 0.024040822226051434, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.815, "pct_cuda_time": 0.08203421752496322, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0022508339534350946, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.799, "cuda_time_us": 151.39, "pct_cuda_time": 0.22656499482083706, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.654, "pct_cuda_time": 0.22546352288617733, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.138, "cuda_time_us": 30.944, "pct_cuda_time": 0.04630971133982417, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.944, "pct_cuda_time": 0.04630971133982417, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 461.076, "cuda_time_us": 1529.675, "pct_cuda_time": 2.2892582631122527, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.526, "cuda_time_us": 958.611, "pct_cuda_time": 1.4346237945055647, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 957.875, "pct_cuda_time": 1.433522322570905, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.731, "cuda_time_us": 131.198, "pct_cuda_time": 0.19634635174386803, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.198, "pct_cuda_time": 0.19634635174386803, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.151, "cuda_time_us": 439.866, "pct_cuda_time": 0.65828811686282, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.13, "pct_cuda_time": 0.6571866449281603, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2580.16, "cuda_time_us": 2065.5389999999998, "pct_cuda_time": 3.0912136391917358, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.949, "cuda_time_us": 33.184, "pct_cuda_time": 0.049662017227919, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.184, "pct_cuda_time": 0.049662017227919, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1809.15, "cuda_time_us": 475.03200000000004, "pct_cuda_time": 0.7109163261756515, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.931, "cuda_time_us": 210.141, "pct_cuda_time": 0.31448969269202404, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.373, "pct_cuda_time": 0.3133403306732486, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 485.8, "cuda_time_us": 40.447, "pct_cuda_time": 0.06053156975704074, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.447, "pct_cuda_time": 0.06053156975704074, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 795.454, "cuda_time_us": 73.69500000000001, "pct_cuda_time": 0.11028936715319101, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.576, "pct_cuda_time": 0.02480706357190168, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.615, "pct_cuda_time": 0.08323146962785424, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0022508339534350946, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 220.498, "cuda_time_us": 150.74900000000002, "pct_cuda_time": 0.22560569657339566, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.014, "pct_cuda_time": 0.22450572120386453, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.467, "cuda_time_us": 30.464, "pct_cuda_time": 0.04559136007808957, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.464, "pct_cuda_time": 0.04559136007808957, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 535.839, "cuda_time_us": 1526.859, "pct_cuda_time": 2.285043935710076, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.999, "cuda_time_us": 955.699, "pct_cuda_time": 1.4302657968510415, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 954.963, "pct_cuda_time": 1.4291643249163817, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.452, "cuda_time_us": 131.166, "pct_cuda_time": 0.1962984616597524, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.166, "pct_cuda_time": 0.1962984616597524, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 217.006, "cuda_time_us": 439.99399999999997, "pct_cuda_time": 0.6584796771992825, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.226, "pct_cuda_time": 0.6573303151805072, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2415.92, "cuda_time_us": 2066.823, "pct_cuda_time": 3.0931352288168763, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.008, "cuda_time_us": 32.352, "pct_cuda_time": 0.04841687504091235, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.352, "pct_cuda_time": 0.04841687504091235, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1719.243, "cuda_time_us": 475.09799999999996, "pct_cuda_time": 0.7110150994741399, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 159.923, "cuda_time_us": 210.653, "pct_cuda_time": 0.3152559340378743, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.917, "pct_cuda_time": 0.31415446210321457, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 479.187, "cuda_time_us": 40.512, "pct_cuda_time": 0.06062884649040063, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.512, "pct_cuda_time": 0.06062884649040063, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 731.319, "cuda_time_us": 73.343, "pct_cuda_time": 0.10976257622791898, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.16, "pct_cuda_time": 0.024184492478398358, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.903, "pct_cuda_time": 0.083662480384895, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0019156033646256121, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 194.399, "cuda_time_us": 150.59, "pct_cuda_time": 0.22536774271794605, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.854, "pct_cuda_time": 0.22426627078328634, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.547, "cuda_time_us": 31.392, "pct_cuda_time": 0.04698017251744314, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.392, "pct_cuda_time": 0.04698017251744314, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.767, "cuda_time_us": 1527.981, "pct_cuda_time": 2.286723081784381, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.98, "cuda_time_us": 956.1469999999999, "pct_cuda_time": 1.4309362580286602, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.411, "pct_cuda_time": 1.4298347860940006, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.018, "cuda_time_us": 131.646, "pct_cuda_time": 0.19701681292148698, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.646, "pct_cuda_time": 0.19701681292148698, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.915, "cuda_time_us": 440.18800000000005, "pct_cuda_time": 0.6587700108342337, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.451, "pct_cuda_time": 0.6576670423344453, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2367.587, "cuda_time_us": 2064.997, "pct_cuda_time": 3.0904025008920275, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.846, "cuda_time_us": 33.471, "pct_cuda_time": 0.050091531419831144, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.471, "pct_cuda_time": 0.050091531419831144, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1690.774, "cuda_time_us": 473.08399999999995, "pct_cuda_time": 0.7080010173051117, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.459, "cuda_time_us": 209.34099999999998, "pct_cuda_time": 0.313292440589133, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.605, "pct_cuda_time": 0.31219096865447327, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 455.707, "cuda_time_us": 40.0, "pct_cuda_time": 0.05986260514455038, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.0, "pct_cuda_time": 0.05986260514455038, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 734.639, "cuda_time_us": 72.833, "pct_cuda_time": 0.10899932801232595, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 15.937, "pct_cuda_time": 0.023850758454717486, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.584, "pct_cuda_time": 0.08318507610886722, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0019634934487412527, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 194.071, "cuda_time_us": 150.91, "pct_cuda_time": 0.22584664355910244, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.174, "pct_cuda_time": 0.2247451716244427, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.979, "cuda_time_us": 30.719, "pct_cuda_time": 0.04597298418588608, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.719, "pct_cuda_time": 0.04597298418588608, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 461.309, "cuda_time_us": 1527.723, "pct_cuda_time": 2.2863369679811987, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.971, "cuda_time_us": 957.011, "pct_cuda_time": 1.4322292902997826, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 956.275, "pct_cuda_time": 1.4311278183651228, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.746, "cuda_time_us": 131.102, "pct_cuda_time": 0.19620268149152112, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.102, "pct_cuda_time": 0.19620268149152112, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.084, "cuda_time_us": 439.61, "pct_cuda_time": 0.6579049961898948, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 438.874, "pct_cuda_time": 0.6568035242552351, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2448.846, "cuda_time_us": 2067.844, "pct_cuda_time": 3.094663221813191, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.518, "cuda_time_us": 33.056, "pct_cuda_time": 0.04947045689145643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.056, "pct_cuda_time": 0.04947045689145643, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1754.183, "cuda_time_us": 473.40099999999995, "pct_cuda_time": 0.7084754284508823, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.557, "cuda_time_us": 209.117, "pct_cuda_time": 0.31295721000032356, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 208.349, "pct_cuda_time": 0.3118078479815482, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 479.8, "cuda_time_us": 40.447, "pct_cuda_time": 0.06053156975704074, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.447, "pct_cuda_time": 0.06053156975704074, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 765.181, "cuda_time_us": 73.53500000000001, "pct_cuda_time": 0.11004991673261283, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.024232382562513994, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.839, "pct_cuda_time": 0.08356670021666372, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.0022508339534350946, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.728, "cuda_time_us": 150.302, "pct_cuda_time": 0.22493673196090527, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.566, "pct_cuda_time": 0.22383526002624554, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.392, "cuda_time_us": 30.879, "pct_cuda_time": 0.046212434606464285, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.879, "pct_cuda_time": 0.046212434606464285, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.8, "cuda_time_us": 1530.508, "pct_cuda_time": 2.290504901864388, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.088, "cuda_time_us": 958.676, "pct_cuda_time": 1.4347210712389247, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 957.94, "pct_cuda_time": 1.4336195993042649, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.069, "cuda_time_us": 131.838, "pct_cuda_time": 0.19730415342618085, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.838, "pct_cuda_time": 0.19730415342618085, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.687, "cuda_time_us": 439.99399999999997, "pct_cuda_time": 0.6584796771992825, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.258, "pct_cuda_time": 0.6573782052646228, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2369.895, "cuda_time_us": 2067.301, "pct_cuda_time": 3.0938505869483537, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.567, "cuda_time_us": 32.735, "pct_cuda_time": 0.04899005948517141, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 32.735, "pct_cuda_time": 0.04899005948517141, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1689.769, "cuda_time_us": 474.00800000000004, "pct_cuda_time": 0.709383843483951, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.225, "cuda_time_us": 209.917, "pct_cuda_time": 0.31415446210321457, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.182, "pct_cuda_time": 0.3130544867336834, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 484.534, "cuda_time_us": 40.415, "pct_cuda_time": 0.06048367967292509, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.415, "pct_cuda_time": 0.06048367967292509, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 721.372, "cuda_time_us": 73.247, "pct_cuda_time": 0.10961890597557206, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.032, "pct_cuda_time": 0.02399293214193579, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.743, "pct_cuda_time": 0.0834230299643168, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.002202943869319454, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 188.706, "cuda_time_us": 150.429, "pct_cuda_time": 0.22512679573223923, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0010999753695311132, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.694, "pct_cuda_time": 0.2240268203627081, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.908, "cuda_time_us": 31.456, "pct_cuda_time": 0.047075952685674424, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 31.456, "pct_cuda_time": 0.047075952685674424, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 456.765, "cuda_time_us": 1529.1019999999999, "pct_cuda_time": 2.2884007312935566, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.152, "cuda_time_us": 956.7239999999999, "pct_cuda_time": 1.4317997761078705, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.987, "pct_cuda_time": 1.4306968076080822, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.009, "cuda_time_us": 131.806, "pct_cuda_time": 0.1972562633420652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.806, "pct_cuda_time": 0.1972562633420652, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.657, "cuda_time_us": 440.572, "pct_cuda_time": 0.6593446918436213, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0011029684997883409, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.835, "pct_cuda_time": 0.6582417233438329, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2454.849, "cuda_time_us": 2066.3070000000002, "pct_cuda_time": 3.092363001210512, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.756, "cuda_time_us": 33.471, "pct_cuda_time": 0.050091531419831144, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.471, "pct_cuda_time": 0.050091531419831144, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1705.612, "cuda_time_us": 474.71400000000006, "pct_cuda_time": 0.7104404184647524, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.072, "cuda_time_us": 210.173, "pct_cuda_time": 0.3145375827761397, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 209.437, "pct_cuda_time": 0.31343611084148, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3072, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 467.533, "cuda_time_us": 40.352, "pct_cuda_time": 0.06038939606982242, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 40.352, "pct_cuda_time": 0.06038939606982242, "trace": "_C::rotary_embedding(int64[3072], bfloat16[3072, 4096], bfloat16[3072, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.563, "cuda_time_us": 72.767, "pct_cuda_time": 0.10890055471383744, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 16.127, "pct_cuda_time": 0.0241351058291541, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3072], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.232, "pct_cuda_time": 0.08265828518359516, "trace": "_vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.002107163701088173, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], None, None, bfloat16[3072, 32, 128], int32[7], int32[7], None, None, None, 512, 512, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3072, 32, 128], bfloat16[3072, 8, 128], bfloat16[3072, 8, 128], bfloat16[3072, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 213.551, "cuda_time_us": 151.422, "pct_cuda_time": 0.22661288490495268, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.654, "pct_cuda_time": 0.22546352288617733, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3072, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.293, "cuda_time_us": 30.303, "pct_cuda_time": 0.04535041309238276, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 30.303, "pct_cuda_time": 0.04535041309238276, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 513.623, "cuda_time_us": 1527.8190000000002, "pct_cuda_time": 2.2864806382335456, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 193.574, "cuda_time_us": 956.051, "pct_cuda_time": 1.4307925877763135, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 955.315, "pct_cuda_time": 1.4296911158416536, "trace": "mm(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3072, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3072, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.457, "cuda_time_us": 131.55, "pct_cuda_time": 0.1968731426691401, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 131.55, "pct_cuda_time": 0.1968731426691401, "trace": "_C::silu_and_mul(bfloat16[3072, 14336], bfloat16[3072, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 158.943, "cuda_time_us": 440.218, "pct_cuda_time": 0.658814907788092, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.001101471934659727, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 439.482, "pct_cuda_time": 0.6577134358534323, "trace": "mm(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3072, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3072, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.818, "cuda_time_us": 33.44, "pct_cuda_time": 0.05004513790084411, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 33.44, "pct_cuda_time": 0.05004513790084411, "trace": "_C::fused_add_rms_norm(bfloat16[3072, 4096], bfloat16[3072, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 450.807, "cuda_time_us": 364.79499999999996, "pct_cuda_time": 0.5459394760926564, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 5.216, "pct_cuda_time": 0.00780608371084937, "trace": "index_select(bfloat16[3072, 4096], 0, int64[6])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[6, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 358.811, "pct_cuda_time": 0.5369840303630317, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[6, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3163.229, "cuda_time_us": 124.861, "pct_cuda_time": 0.18686261852384264, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.003735426561019944, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.0036875364769043037, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.0036396463927886634, "trace": "copy_(int32[6], int32[6], True) <- _to_copy(int32[6], 3, 0, None, None, True, None) <- to(int32[6], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.003783316645135584, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0011972521028910076, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0011493620187753672, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 4.383, "pct_cuda_time": 0.006559444958714108, "trace": "copy_(float32[6, 128256], bfloat16[6, 128256], False) <- _to_copy(bfloat16[6, 128256], 6, None, None, None, False, None) <- to(bfloat16[6, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 5.6, "pct_cuda_time": 0.008380764720237053, "trace": "div_(float32[6, 128256], bfloat16[6, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 35.679, "pct_cuda_time": 0.05339594722381033, "trace": "_softmax(float32[6, 128256], -1, False) <- softmax(float32[6, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.383, "pct_cuda_time": 0.04247700804544434, "trace": "_log_softmax(float32[6, 128256], -1, False) <- log_softmax(float32[6, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.952, "pct_cuda_time": 0.0029212951310540586, "trace": "copy_(int64[6], int32[6], False) <- _to_copy(int32[6], 4, None, None, None, False, None) <- to(int32[6], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 6.176, "pct_cuda_time": 0.00924278623431858, "trace": "index(float32[6, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 27.84, "pct_cuda_time": 0.04166437318060707, "trace": "argmax(float32[6, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.003879096813366865, "trace": "copy_(int64[6], int64[6], False) <- _to_copy(int64[6], 4, 0, None, None, False, None) <- to(int64[6], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] }, "decode_1": { "metadata": { "num_running_seqs": 6 }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 6536.813999999998, "pct_cuda_time": 93.33944095551921, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 5.824, "pct_cuda_time": 0.08316113998730025, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 5.824, "pct_cuda_time": 0.08316113998730025, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 6527.949999999998, "pct_cuda_time": 93.2128715281759, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 199.52100000000002, "pct_cuda_time": 2.8489687176178116, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.16, "pct_cuda_time": 0.05940081427664303, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 195.36100000000002, "pct_cuda_time": 2.7895679033411684, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 2038.3760000000004, "pct_cuda_time": 29.10605629854965, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 668.569, "pct_cuda_time": 9.546524759644461, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 668.569, "pct_cuda_time": 9.546524759644461, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 122.592, "pct_cuda_time": 1.7504963037986112, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 122.592, "pct_cuda_time": 1.7504963037986112, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 673.6260000000001, "pct_cuda_time": 9.618733874499506, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 85.95300000000002, "pct_cuda_time": 1.2273264878654566, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 545.752, "pct_cuda_time": 7.792815671419828, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 41.92100000000001, "pct_cuda_time": 0.5985917152142194, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 573.589, "pct_cuda_time": 8.190301360607068, "invocations": 32 }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cuda_time_us": 505.109, "pct_cuda_time": 7.212472571745406, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cuda_time_us": 68.48000000000002, "pct_cuda_time": 0.9778287888616625, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 4290.052999999999, "pct_cuda_time": 61.25784651200846, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 2600.1889999999994, "pct_cuda_time": 37.128207661819744, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2600.1889999999994, "pct_cuda_time": 37.128207661819744, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 289.242, "pct_cuda_time": 4.1300986353376885, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 289.242, "pct_cuda_time": 4.1300986353376885, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 1400.622, "pct_cuda_time": 19.999540214851038, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 1400.622, "pct_cuda_time": 19.999540214851038, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 349.78700000000003, "pct_cuda_time": 4.994623226775033, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 4.96, "pct_cuda_time": 0.07082404779138207, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.768, "pct_cuda_time": 0.010966304174149483, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 344.059, "pct_cuda_time": 4.912832874809501, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 116.67, "pct_cuda_time": 1.6659358177057555, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.343, "pct_cuda_time": 0.0762929208365634, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 4.353, "pct_cuda_time": 0.06215666936207383, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 5.536, "pct_cuda_time": 0.07904877592199418, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 34.784, "pct_cuda_time": 0.49668219322085366, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.255, "pct_cuda_time": 0.4034543286986896, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.824, "pct_cuda_time": 0.02604497241360502, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 6.24, "pct_cuda_time": 0.08910122141496456, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 27.712, "pct_cuda_time": 0.3957008089505605, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.623, "pct_cuda_time": 0.03745392688645065, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 78666.36, "cuda_time_us": 6536.813999999998, "pct_cuda_time": 93.33944095551921, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 294.012, "cuda_time_us": 5.824, "pct_cuda_time": 0.08316113998730025, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 5.824, "pct_cuda_time": 0.08316113998730025, "trace": "index_select(bfloat16[128256, 4096], 0, int64[6]) <- embedding(bfloat16[128256, 4096], int64[6], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4122.084, "cuda_time_us": 210.783, "pct_cuda_time": 3.00977928742155, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 273.58, "cuda_time_us": 4.16, "pct_cuda_time": 0.05940081427664303, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.16, "pct_cuda_time": 0.05940081427664303, "trace": "_C::rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2997.244, "cuda_time_us": 69.184, "pct_cuda_time": 0.9878812343546326, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 528.929, "cuda_time_us": 26.112, "pct_cuda_time": 0.3728543419210824, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 26.112, "pct_cuda_time": 0.3728543419210824, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 872.49, "cuda_time_us": 3.681, "pct_cuda_time": 0.052561153209693026, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.681, "pct_cuda_time": 0.052561153209693026, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1042.747, "cuda_time_us": 21.184, "pct_cuda_time": 0.3024872234702899, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.657, "pct_cuda_time": 0.037939414310827056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.151, "pct_cuda_time": 0.2448998475141117, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.019647961645351156, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 302.255, "cuda_time_us": 18.207, "pct_cuda_time": 0.25997851575356723, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.095, "pct_cuda_time": 0.22982117927465612, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 122.177, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 613.257, "cuda_time_us": 134.303, "pct_cuda_time": 1.9177181634124973, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 233.106, "cuda_time_us": 81.247, "pct_cuda_time": 1.160129316715004, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.247, "pct_cuda_time": 1.160129316715004, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 134.263, "cuda_time_us": 8.832, "pct_cuda_time": 0.12611249800271906, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.832, "pct_cuda_time": 0.12611249800271906, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 163.889, "cuda_time_us": 44.224, "pct_cuda_time": 0.6314763486947743, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.224, "pct_cuda_time": 0.6314763486947743, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2547.534, "cuda_time_us": 204.89, "pct_cuda_time": 2.9256328935436033, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.894, "cuda_time_us": 3.135, "pct_cuda_time": 0.04476479633588363, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.04476479633588363, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1815.504, "cuda_time_us": 64.125, "pct_cuda_time": 0.9156435614158015, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.697, "cuda_time_us": 21.376, "pct_cuda_time": 0.3052287995138273, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.376, "pct_cuda_time": 0.3052287995138273, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 565.2, "cuda_time_us": 3.872, "pct_cuda_time": 0.05528845021133698, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.05528845021133698, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 765.646, "cuda_time_us": 20.926000000000002, "pct_cuda_time": 0.2988032306617866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.655, "pct_cuda_time": 0.0379108562270402, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.959, "pct_cuda_time": 0.2421582714705743, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018734102964172033, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 176.804, "cuda_time_us": 17.951, "pct_cuda_time": 0.25632308102885076, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.839, "pct_cuda_time": 0.22616574454993968, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.118, "cuda_time_us": 3.168, "pct_cuda_time": 0.04523600471836662, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04523600471836662, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 483.032, "cuda_time_us": 134.462, "pct_cuda_time": 1.9199885310735516, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.655, "cuda_time_us": 80.831, "pct_cuda_time": 1.1541892352873397, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.831, "pct_cuda_time": 1.1541892352873397, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 106.616, "cuda_time_us": 9.023, "pct_cuda_time": 0.128839795004363, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.023, "pct_cuda_time": 0.128839795004363, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.823, "cuda_time_us": 44.608, "pct_cuda_time": 0.6369595007818492, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.608, "pct_cuda_time": 0.6369595007818492, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2393.025, "cuda_time_us": 205.50200000000004, "pct_cuda_time": 2.934371667182379, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.463, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1697.578, "cuda_time_us": 63.904, "pct_cuda_time": 0.9124878931573549, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.471, "cuda_time_us": 20.832, "pct_cuda_time": 0.2974610007238047, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.832, "pct_cuda_time": 0.2974610007238047, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 537.65, "cuda_time_us": 3.968, "pct_cuda_time": 0.056659238233105653, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.968, "pct_cuda_time": 0.056659238233105653, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 704.928, "cuda_time_us": 21.376, "pct_cuda_time": 0.3052287995138273, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03792513526893363, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.408, "pct_cuda_time": 0.24856956128072163, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018734102964172033, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 163.319, "cuda_time_us": 17.728, "pct_cuda_time": 0.25313885468661723, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.22298151820770612, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.718, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.375, "cuda_time_us": 135.51800000000003, "pct_cuda_time": 1.9350671993130077, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.554, "cuda_time_us": 83.007, "pct_cuda_time": 1.18526043044743, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.007, "pct_cuda_time": 1.18526043044743, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.838, "cuda_time_us": 8.96, "pct_cuda_time": 0.1279402153650773, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.1279402153650773, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.104, "cuda_time_us": 43.551, "pct_cuda_time": 0.6218665535005002, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.551, "pct_cuda_time": 0.6218665535005002, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2491.67, "cuda_time_us": 204.34900000000002, "pct_cuda_time": 2.9179079318792613, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.395, "cuda_time_us": 3.232, "pct_cuda_time": 0.04614986339954574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04614986339954574, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1746.593, "cuda_time_us": 62.784000000000006, "pct_cuda_time": 0.8964953662367204, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.185, "cuda_time_us": 20.512, "pct_cuda_time": 0.29289170731790914, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.512, "pct_cuda_time": 0.29289170731790914, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 521.72, "cuda_time_us": 3.872, "pct_cuda_time": 0.05528845021133698, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.05528845021133698, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.27, "cuda_time_us": 20.737000000000002, "pct_cuda_time": 0.2961044917439295, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.657, "pct_cuda_time": 0.037939414310827056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.832, "pct_cuda_time": 0.24034483315010952, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.01782024428299291, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 173.675, "cuda_time_us": 17.663, "pct_cuda_time": 0.2522107169635447, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22206765952652702, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.111, "pct_cuda_time": 0.03014305743701766, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.627, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 512.772, "cuda_time_us": 135.293, "pct_cuda_time": 1.9318544148869872, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 200.147, "cuda_time_us": 81.918, "pct_cuda_time": 1.1697105538254915, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.918, "pct_cuda_time": 1.1697105538254915, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.524, "cuda_time_us": 9.216, "pct_cuda_time": 0.1315956500897938, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.216, "pct_cuda_time": 0.1315956500897938, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.543, "cuda_time_us": 44.159, "pct_cuda_time": 0.6305482109717018, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.159, "pct_cuda_time": 0.6305482109717018, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2332.148, "cuda_time_us": 203.58300000000003, "pct_cuda_time": 2.9069701857888988, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.103, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1643.43, "cuda_time_us": 63.486999999999995, "pct_cuda_time": 0.9065335326877971, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.474, "cuda_time_us": 20.767, "pct_cuda_time": 0.29653286300073217, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.767, "pct_cuda_time": 0.29653286300073217, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 484.227, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 708.248, "cuda_time_us": 20.896, "pct_cuda_time": 0.29837485940498387, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.928, "pct_cuda_time": 0.2417156211718782, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.435, "cuda_time_us": 17.983999999999998, "pct_cuda_time": 0.2567942894113337, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.68, "pct_cuda_time": 0.22389537688888528, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.304, "pct_cuda_time": 0.03289891252244845, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.771, "cuda_time_us": 3.137, "pct_cuda_time": 0.04479335441967048, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.04479335441967048, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 461.905, "cuda_time_us": 133.919, "pct_cuda_time": 1.912235011325423, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.029, "cuda_time_us": 81.343, "pct_cuda_time": 1.1615001047367728, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.343, "pct_cuda_time": 1.1615001047367728, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.902, "cuda_time_us": 8.961, "pct_cuda_time": 0.12795449440697074, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.961, "pct_cuda_time": 0.12795449440697074, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.632, "cuda_time_us": 43.615, "pct_cuda_time": 0.6227804121816793, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.615, "pct_cuda_time": 0.6227804121816793, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2385.639, "cuda_time_us": 202.527, "pct_cuda_time": 2.8918915175494426, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.332, "cuda_time_us": 3.137, "pct_cuda_time": 0.04479335441967048, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.04479335441967048, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1705.468, "cuda_time_us": 63.199999999999996, "pct_cuda_time": 0.9024354476643844, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.241, "cuda_time_us": 20.32, "pct_cuda_time": 0.2901501312743718, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.32, "pct_cuda_time": 0.2901501312743718, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 483.512, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 764.451, "cuda_time_us": 21.152, "pct_cuda_time": 0.30203029412970034, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.152, "pct_cuda_time": 0.2449141265560051, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018734102964172033, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.547, "cuda_time_us": 17.887999999999998, "pct_cuda_time": 0.255423501389565, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.2234384475482957, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.24, "pct_cuda_time": 0.031985053841269324, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.444, "cuda_time_us": 3.137, "pct_cuda_time": 0.04479335441967048, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.04479335441967048, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 451.005, "cuda_time_us": 133.053, "pct_cuda_time": 1.8998693610457176, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.734, "cuda_time_us": 81.215, "pct_cuda_time": 1.1596723873744144, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.215, "pct_cuda_time": 1.1596723873744144, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.158, "cuda_time_us": 8.991, "pct_cuda_time": 0.12838286566377344, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.991, "pct_cuda_time": 0.12838286566377344, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.726, "cuda_time_us": 42.847, "pct_cuda_time": 0.6118141080075299, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.847, "pct_cuda_time": 0.6118141080075299, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2332.84, "cuda_time_us": 205.022, "pct_cuda_time": 2.9275177270735355, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.936, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1635.344, "cuda_time_us": 63.648, "pct_cuda_time": 0.9088324584326384, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.995, "cuda_time_us": 20.448, "pct_cuda_time": 0.29197784863673, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.448, "pct_cuda_time": 0.29197784863673, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 470.935, "cuda_time_us": 3.872, "pct_cuda_time": 0.05528845021133698, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.05528845021133698, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 707.047, "cuda_time_us": 20.928, "pct_cuda_time": 0.2988317887455734, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.928, "pct_cuda_time": 0.2417156211718782, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018734102964172033, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.613, "cuda_time_us": 18.4, "pct_cuda_time": 0.26273437083899803, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.288, "pct_cuda_time": 0.23257703436008695, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.316, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.191, "cuda_time_us": 135.166, "pct_cuda_time": 1.930040976566522, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.716, "cuda_time_us": 82.751, "pct_cuda_time": 1.1816049957227135, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.751, "pct_cuda_time": 1.1816049957227135, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.03, "cuda_time_us": 9.055, "pct_cuda_time": 0.12929672434495257, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.055, "pct_cuda_time": 0.12929672434495257, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.616, "cuda_time_us": 43.36, "pct_cuda_time": 0.6191392564988563, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.36, "pct_cuda_time": 0.6191392564988563, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2370.605, "cuda_time_us": 203.83800000000002, "pct_cuda_time": 2.9106113414717223, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.026, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1692.188, "cuda_time_us": 63.52, "pct_cuda_time": 0.9070047410702801, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.531, "cuda_time_us": 20.736, "pct_cuda_time": 0.29609021270203606, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.736, "pct_cuda_time": 0.29609021270203606, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 537.32, "cuda_time_us": 4.0, "pct_cuda_time": 0.057116167573695226, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.0, "pct_cuda_time": 0.057116167573695226, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 696.486, "cuda_time_us": 21.152, "pct_cuda_time": 0.30203029412970034, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.655, "pct_cuda_time": 0.0379108562270402, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.184, "pct_cuda_time": 0.2453710558965947, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.018748382006065455, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.204, "cuda_time_us": 17.631999999999998, "pct_cuda_time": 0.2517680666648485, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22206765952652702, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.029700407138321516, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.909, "cuda_time_us": 3.2, "pct_cuda_time": 0.045692934058956185, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.045692934058956185, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 457.088, "cuda_time_us": 134.078, "pct_cuda_time": 1.914505378986477, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.026, "cuda_time_us": 81.375, "pct_cuda_time": 1.1619570340773622, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.375, "pct_cuda_time": 1.1619570340773622, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.637, "cuda_time_us": 8.927, "pct_cuda_time": 0.1274690069825943, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.927, "pct_cuda_time": 0.1274690069825943, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.066, "cuda_time_us": 43.776, "pct_cuda_time": 0.6250793379265205, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.776, "pct_cuda_time": 0.6250793379265205, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2256.027, "cuda_time_us": 203.036, "pct_cuda_time": 2.899159549873196, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.09, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1563.32, "cuda_time_us": 63.358999999999995, "pct_cuda_time": 0.9047058153254388, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.176, "cuda_time_us": 20.672, "pct_cuda_time": 0.29517635402085696, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.672, "pct_cuda_time": 0.29517635402085696, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 469.469, "cuda_time_us": 3.904, "pct_cuda_time": 0.05574537955192653, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.904, "pct_cuda_time": 0.05574537955192653, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 667.645, "cuda_time_us": 20.864, "pct_cuda_time": 0.2979179300643943, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03792513526893363, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.928, "pct_cuda_time": 0.2417156211718782, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 153.871, "cuda_time_us": 17.919, "pct_cuda_time": 0.25586615168826116, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.807, "pct_cuda_time": 0.2257088152093501, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.631, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 476.674, "cuda_time_us": 133.597, "pct_cuda_time": 1.9076371598357404, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.577, "cuda_time_us": 80.318, "pct_cuda_time": 1.1468640867960131, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.318, "pct_cuda_time": 1.1468640867960131, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.914, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.89, "cuda_time_us": 44.287, "pct_cuda_time": 0.6323759283340601, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.287, "pct_cuda_time": 0.6323759283340601, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2373.573, "cuda_time_us": 202.94299999999998, "pct_cuda_time": 2.897831598977107, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.134, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1704.184, "cuda_time_us": 63.072, "pct_cuda_time": 0.9006077303020263, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.524, "cuda_time_us": 20.416, "pct_cuda_time": 0.29152091929614043, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.416, "pct_cuda_time": 0.29152091929614043, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 517.009, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 724.897, "cuda_time_us": 20.96, "pct_cuda_time": 0.299288718086163, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03792513526893363, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.024, "pct_cuda_time": 0.24308640919364688, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 156.207, "cuda_time_us": 17.856, "pct_cuda_time": 0.2549665720489755, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.744, "pct_cuda_time": 0.2248092355700644, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.634, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.975, "cuda_time_us": 133.791, "pct_cuda_time": 1.9104072939630643, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.403, "cuda_time_us": 81.407, "pct_cuda_time": 1.1624139634179516, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.407, "pct_cuda_time": 1.1624139634179516, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.053, "cuda_time_us": 9.024, "pct_cuda_time": 0.1288540740462564, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.1288540740462564, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.774, "cuda_time_us": 43.36, "pct_cuda_time": 0.6191392564988563, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.36, "pct_cuda_time": 0.6191392564988563, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2281.984, "cuda_time_us": 202.81400000000002, "pct_cuda_time": 2.895989602572856, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.566, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1633.85, "cuda_time_us": 63.486999999999995, "pct_cuda_time": 0.9065335326877971, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.409, "cuda_time_us": 20.608, "pct_cuda_time": 0.2942624953396778, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.2942624953396778, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 476.294, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 713.031, "cuda_time_us": 20.959, "pct_cuda_time": 0.29927443904426954, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.991, "pct_cuda_time": 0.2426152008111639, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 154.451, "cuda_time_us": 18.080000000000002, "pct_cuda_time": 0.2581650774331024, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.775, "pct_cuda_time": 0.22525188586876055, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.305, "pct_cuda_time": 0.03291319156434187, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.533, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 441.257, "cuda_time_us": 133.247, "pct_cuda_time": 1.9026394951730423, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.128, "cuda_time_us": 80.959, "pct_cuda_time": 1.156016952649698, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.959, "pct_cuda_time": 1.156016952649698, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.228, "cuda_time_us": 8.928, "pct_cuda_time": 0.12748328602448775, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12748328602448775, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.217, "cuda_time_us": 43.36, "pct_cuda_time": 0.6191392564988563, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.36, "pct_cuda_time": 0.6191392564988563, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2404.298, "cuda_time_us": 203.005, "pct_cuda_time": 2.8987168995744996, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.195, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1697.956, "cuda_time_us": 62.944, "pct_cuda_time": 0.8987800129396681, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 132.735, "cuda_time_us": 20.448, "pct_cuda_time": 0.29197784863673, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.448, "pct_cuda_time": 0.29197784863673, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 500.094, "cuda_time_us": 3.777, "pct_cuda_time": 0.053931941231461715, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.777, "pct_cuda_time": 0.053931941231461715, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 709.356, "cuda_time_us": 20.8, "pct_cuda_time": 0.29700407138321516, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03792513526893363, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.832, "pct_cuda_time": 0.24034483315010952, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018734102964172033, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.087, "cuda_time_us": 17.919, "pct_cuda_time": 0.25586615168826116, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.807, "pct_cuda_time": 0.2257088152093501, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.139, "cuda_time_us": 3.104, "pct_cuda_time": 0.044322146037187496, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.044322146037187496, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.975, "cuda_time_us": 133.821, "pct_cuda_time": 1.910835665219867, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.334, "cuda_time_us": 80.863, "pct_cuda_time": 1.1546461646279291, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.863, "pct_cuda_time": 1.1546461646279291, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.554, "cuda_time_us": 9.055, "pct_cuda_time": 0.12929672434495257, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.055, "pct_cuda_time": 0.12929672434495257, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.439, "cuda_time_us": 43.903, "pct_cuda_time": 0.6268927762469854, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.903, "pct_cuda_time": 0.6268927762469854, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2382.333, "cuda_time_us": 205.245, "pct_cuda_time": 2.930701953415769, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.022, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1695.168, "cuda_time_us": 64.287, "pct_cuda_time": 0.9179567662025363, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.911, "cuda_time_us": 21.119, "pct_cuda_time": 0.30155908574721735, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.119, "pct_cuda_time": 0.30155908574721735, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 490.919, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 750.773, "cuda_time_us": 20.992, "pct_cuda_time": 0.2997456474267525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.72, "pct_cuda_time": 0.038838993950112755, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.992, "pct_cuda_time": 0.2426294798530573, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.246, "cuda_time_us": 18.4, "pct_cuda_time": 0.26273437083899803, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.288, "pct_cuda_time": 0.23257703436008695, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.737, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 464.405, "cuda_time_us": 134.878, "pct_cuda_time": 1.9259286125012158, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.682, "cuda_time_us": 81.118, "pct_cuda_time": 1.1582873203107522, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.118, "pct_cuda_time": 1.1582873203107522, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.15, "cuda_time_us": 9.344, "pct_cuda_time": 0.13342336745215203, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.344, "pct_cuda_time": 0.13342336745215203, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.694, "cuda_time_us": 44.416, "pct_cuda_time": 0.6342179247383117, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.416, "pct_cuda_time": 0.6342179247383117, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2260.983, "cuda_time_us": 203.09699999999998, "pct_cuda_time": 2.9000305714286942, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.358, "cuda_time_us": 3.104, "pct_cuda_time": 0.044322146037187496, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.044322146037187496, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1568.665, "cuda_time_us": 63.164, "pct_cuda_time": 0.9019214021562213, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.874, "cuda_time_us": 20.479, "pct_cuda_time": 0.2924204989354261, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.479, "pct_cuda_time": 0.2924204989354261, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 465.252, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 672.553, "cuda_time_us": 21.151, "pct_cuda_time": 0.3020160150878069, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.752, "pct_cuda_time": 0.03929592329070231, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.991, "pct_cuda_time": 0.2426152008111639, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.020104890985940718, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 155.101, "cuda_time_us": 17.694, "pct_cuda_time": 0.2526533672622408, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.583, "pct_cuda_time": 0.22251030982522318, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.111, "pct_cuda_time": 0.03014305743701766, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.409, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.803, "cuda_time_us": 133.789, "pct_cuda_time": 1.9103787358792774, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.38, "cuda_time_us": 80.606, "pct_cuda_time": 1.1509764508613192, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.606, "pct_cuda_time": 1.1509764508613192, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 116.717, "cuda_time_us": 9.152, "pct_cuda_time": 0.13068179140861466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.13068179140861466, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.913, "cuda_time_us": 44.031, "pct_cuda_time": 0.6287204936093436, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.031, "pct_cuda_time": 0.6287204936093436, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2380.757, "cuda_time_us": 203.26100000000002, "pct_cuda_time": 2.9023723342992165, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.1, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1703.97, "cuda_time_us": 63.359, "pct_cuda_time": 0.9047058153254388, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.054, "cuda_time_us": 20.544, "pct_cuda_time": 0.29334863665849864, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.544, "pct_cuda_time": 0.29334863665849864, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 532.244, "cuda_time_us": 3.744, "pct_cuda_time": 0.05346073284897873, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05346073284897873, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 714.837, "cuda_time_us": 21.12, "pct_cuda_time": 0.3015733647891108, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03792513526893363, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.184, "pct_cuda_time": 0.2453710558965947, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 155.068, "cuda_time_us": 17.951, "pct_cuda_time": 0.25632308102885076, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.839, "pct_cuda_time": 0.22616574454993968, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.866, "cuda_time_us": 3.168, "pct_cuda_time": 0.04523600471836662, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04523600471836662, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.597, "cuda_time_us": 133.598, "pct_cuda_time": 1.9076514388776338, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.086, "cuda_time_us": 81.215, "pct_cuda_time": 1.1596723873744144, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.215, "pct_cuda_time": 1.1596723873744144, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.151, "cuda_time_us": 9.216, "pct_cuda_time": 0.1315956500897938, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.216, "pct_cuda_time": 0.1315956500897938, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.146, "cuda_time_us": 43.167, "pct_cuda_time": 0.6163834014134254, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.167, "pct_cuda_time": 0.6163834014134254, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2296.416, "cuda_time_us": 203.935, "pct_cuda_time": 2.911996408535384, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.402, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1626.502, "cuda_time_us": 63.264, "pct_cuda_time": 0.9033493063455637, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.485, "cuda_time_us": 20.736, "pct_cuda_time": 0.29609021270203606, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.736, "pct_cuda_time": 0.29609021270203606, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.174, "cuda_time_us": 3.808, "pct_cuda_time": 0.05437459153015785, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05437459153015785, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 710.773, "cuda_time_us": 20.897000000000002, "pct_cuda_time": 0.2983891384468773, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.657, "pct_cuda_time": 0.037939414310827056, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.96, "pct_cuda_time": 0.24217255051246778, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 153.042, "cuda_time_us": 17.823, "pct_cuda_time": 0.2544953636664925, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.22435230622947483, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.111, "pct_cuda_time": 0.03014305743701766, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.683, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 453.715, "cuda_time_us": 134.495, "pct_cuda_time": 1.9204597394560348, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.052, "cuda_time_us": 81.951, "pct_cuda_time": 1.1701817622079742, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.951, "pct_cuda_time": 1.1701817622079742, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.259, "cuda_time_us": 9.152, "pct_cuda_time": 0.13068179140861466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.13068179140861466, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.58, "cuda_time_us": 43.392, "pct_cuda_time": 0.6195961858394459, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.392, "pct_cuda_time": 0.6195961858394459, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2335.812, "cuda_time_us": 204.668, "pct_cuda_time": 2.9224629462432636, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.773, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1640.934, "cuda_time_us": 64.83, "pct_cuda_time": 0.9257102859506653, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 132.438, "cuda_time_us": 20.8, "pct_cuda_time": 0.29700407138321516, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.8, "pct_cuda_time": 0.29700407138321516, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 484.665, "cuda_time_us": 3.775, "pct_cuda_time": 0.053903383147674865, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.775, "pct_cuda_time": 0.053903383147674865, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 705.525, "cuda_time_us": 21.695, "pct_cuda_time": 0.3097838138778295, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.848, "pct_cuda_time": 0.040666711312470995, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.343, "pct_cuda_time": 0.24764142355764907, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.021475679007709404, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 162.276, "cuda_time_us": 18.56, "pct_cuda_time": 0.26501901754194584, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.447, "pct_cuda_time": 0.23484740202114135, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.113, "pct_cuda_time": 0.030171615520804503, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.065, "cuda_time_us": 3.104, "pct_cuda_time": 0.044322146037187496, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.044322146037187496, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.105, "cuda_time_us": 133.598, "pct_cuda_time": 1.9076514388776338, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.693, "cuda_time_us": 81.599, "pct_cuda_time": 1.1651555394614892, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.599, "pct_cuda_time": 1.1651555394614892, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.944, "cuda_time_us": 8.928, "pct_cuda_time": 0.12748328602448775, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12748328602448775, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.185, "cuda_time_us": 43.071, "pct_cuda_time": 0.6150126133916567, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.071, "pct_cuda_time": 0.6150126133916567, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2316.855, "cuda_time_us": 202.71800000000002, "pct_cuda_time": 2.8946188145510874, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.18, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1647.674, "cuda_time_us": 62.815, "pct_cuda_time": 0.8969380165354164, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.016, "cuda_time_us": 20.32, "pct_cuda_time": 0.2901501312743718, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.32, "pct_cuda_time": 0.2901501312743718, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 520.86, "cuda_time_us": 3.775, "pct_cuda_time": 0.053903383147674865, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.775, "pct_cuda_time": 0.053903383147674865, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 684.252, "cuda_time_us": 21.024, "pct_cuda_time": 0.30020257676734213, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.037468205928344066, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.12, "pct_cuda_time": 0.2444571972154156, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 157.256, "cuda_time_us": 17.695999999999998, "pct_cuda_time": 0.2526819253460276, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22206765952652702, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03061426581950064, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.768, "cuda_time_us": 3.168, "pct_cuda_time": 0.04523600471836662, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04523600471836662, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.633, "cuda_time_us": 133.727, "pct_cuda_time": 1.9094934352818855, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.086, "cuda_time_us": 80.607, "pct_cuda_time": 1.1509907299032127, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.607, "pct_cuda_time": 1.1509907299032127, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.377, "cuda_time_us": 8.929, "pct_cuda_time": 0.12749756506638116, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.929, "pct_cuda_time": 0.12749756506638116, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.682, "cuda_time_us": 44.191, "pct_cuda_time": 0.6310051403122914, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.191, "pct_cuda_time": 0.6310051403122914, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2363.964, "cuda_time_us": 204.06, "pct_cuda_time": 2.913781288772062, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.219, "cuda_time_us": 3.104, "pct_cuda_time": 0.044322146037187496, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.044322146037187496, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1648.609, "cuda_time_us": 63.36, "pct_cuda_time": 0.9047200943673324, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.947, "cuda_time_us": 20.768, "pct_cuda_time": 0.2965471420426256, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.768, "pct_cuda_time": 0.2965471420426256, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.124, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 699.22, "cuda_time_us": 20.896, "pct_cuda_time": 0.29837485940498387, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.928, "pct_cuda_time": 0.2417156211718782, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.099, "cuda_time_us": 17.92, "pct_cuda_time": 0.2558804307301546, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.808, "pct_cuda_time": 0.2257230942512435, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.792, "cuda_time_us": 3.039, "pct_cuda_time": 0.04339400831411495, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.039, "pct_cuda_time": 0.04339400831411495, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 489.615, "cuda_time_us": 134.55700000000002, "pct_cuda_time": 1.9213450400534273, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.299, "cuda_time_us": 81.119, "pct_cuda_time": 1.1583015993526458, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.119, "pct_cuda_time": 1.1583015993526458, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.081, "cuda_time_us": 9.055, "pct_cuda_time": 0.12929672434495257, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.055, "pct_cuda_time": 0.12929672434495257, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 168.463, "cuda_time_us": 44.383, "pct_cuda_time": 0.6337467163558288, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.383, "pct_cuda_time": 0.6337467163558288, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2322.129, "cuda_time_us": 203.26099999999997, "pct_cuda_time": 2.9023723342992156, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.422, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1609.134, "cuda_time_us": 63.903, "pct_cuda_time": 0.9124736141154614, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.257, "cuda_time_us": 21.343, "pct_cuda_time": 0.3047575911313443, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.343, "pct_cuda_time": 0.3047575911313443, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.451, "cuda_time_us": 3.745, "pct_cuda_time": 0.05347501189087216, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.745, "pct_cuda_time": 0.05347501189087216, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 690.327, "cuda_time_us": 21.055, "pct_cuda_time": 0.30064522706603825, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.037468205928344066, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.151, "pct_cuda_time": 0.2448998475141117, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 154.665, "cuda_time_us": 17.759999999999998, "pct_cuda_time": 0.2535957840272068, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.2234384475482957, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.594, "cuda_time_us": 3.137, "pct_cuda_time": 0.04479335441967048, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.04479335441967048, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 491.75, "cuda_time_us": 133.18099999999998, "pct_cuda_time": 1.9016970784080756, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.735, "cuda_time_us": 80.895, "pct_cuda_time": 1.1551030939685187, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.895, "pct_cuda_time": 1.1551030939685187, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.499, "cuda_time_us": 9.023, "pct_cuda_time": 0.128839795004363, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.023, "pct_cuda_time": 0.128839795004363, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 187.186, "cuda_time_us": 43.263, "pct_cuda_time": 0.617754189435194, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.263, "pct_cuda_time": 0.617754189435194, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2336.815, "cuda_time_us": 204.637, "pct_cuda_time": 2.9220202959445674, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.913, "cuda_time_us": 3.264, "pct_cuda_time": 0.0466067927401353, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.0466067927401353, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1648.323, "cuda_time_us": 64.38300000000001, "pct_cuda_time": 0.919327554224305, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.341, "cuda_time_us": 20.671, "pct_cuda_time": 0.29516207497896346, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.671, "pct_cuda_time": 0.29516207497896346, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 472.574, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 674.963, "cuda_time_us": 21.376, "pct_cuda_time": 0.3052287995138273, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.28, "pct_cuda_time": 0.24674184391836337, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.020104890985940718, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 168.187, "cuda_time_us": 18.560000000000002, "pct_cuda_time": 0.26501901754194584, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.448, "pct_cuda_time": 0.23486168106303476, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.673, "cuda_time_us": 3.231, "pct_cuda_time": 0.04613558435765231, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.04613558435765231, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.945, "cuda_time_us": 133.759, "pct_cuda_time": 1.9099503646224747, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.421, "cuda_time_us": 80.511, "pct_cuda_time": 1.149619941881444, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.511, "pct_cuda_time": 1.149619941881444, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.713, "cuda_time_us": 9.088, "pct_cuda_time": 0.12976793272743553, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.12976793272743553, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.871, "cuda_time_us": 44.16, "pct_cuda_time": 0.6305624900135952, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.16, "pct_cuda_time": 0.6305624900135952, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2374.549, "cuda_time_us": 203.22700000000003, "pct_cuda_time": 2.90188684687484, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.218, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1700.364, "cuda_time_us": 63.004999999999995, "pct_cuda_time": 0.8996510344951669, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.092, "cuda_time_us": 20.608, "pct_cuda_time": 0.2942624953396778, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.2942624953396778, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 513.834, "cuda_time_us": 3.775, "pct_cuda_time": 0.053903383147674865, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.775, "pct_cuda_time": 0.053903383147674865, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 723.008, "cuda_time_us": 20.959, "pct_cuda_time": 0.29927443904426954, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03792513526893363, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.992, "pct_cuda_time": 0.2426294798530573, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.311, "pct_cuda_time": 0.018719823922278608, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 157.207, "cuda_time_us": 17.663, "pct_cuda_time": 0.2522107169635447, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.551, "pct_cuda_time": 0.2220533804846336, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.571, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.758, "cuda_time_us": 134.04600000000002, "pct_cuda_time": 1.9140484496458876, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.563, "cuda_time_us": 81.471, "pct_cuda_time": 1.163327822099131, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.471, "pct_cuda_time": 1.163327822099131, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.689, "cuda_time_us": 9.055, "pct_cuda_time": 0.12929672434495257, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.055, "pct_cuda_time": 0.12929672434495257, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.978, "cuda_time_us": 43.52, "pct_cuda_time": 0.6214239032018041, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.52, "pct_cuda_time": 0.6214239032018041, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2349.205, "cuda_time_us": 204.031, "pct_cuda_time": 2.9133671965571524, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.786, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1656.75, "cuda_time_us": 63.488, "pct_cuda_time": 0.9065478117296905, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.054, "cuda_time_us": 20.448, "pct_cuda_time": 0.29197784863673, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.448, "pct_cuda_time": 0.29197784863673, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 505.097, "cuda_time_us": 3.904, "pct_cuda_time": 0.05574537955192653, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.904, "pct_cuda_time": 0.05574537955192653, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 696.362, "cuda_time_us": 21.12, "pct_cuda_time": 0.3015733647891108, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.784, "pct_cuda_time": 0.03975285263129187, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.056, "pct_cuda_time": 0.24354333853423646, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 164.779, "cuda_time_us": 18.016, "pct_cuda_time": 0.25725121875192325, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.84, "pct_cuda_time": 0.2261800235918331, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.176, "pct_cuda_time": 0.031071195160090204, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.562, "cuda_time_us": 3.296, "pct_cuda_time": 0.04706372208072486, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04706372208072486, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 469.612, "cuda_time_us": 134.239, "pct_cuda_time": 1.9168043047313184, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.955, "cuda_time_us": 80.479, "pct_cuda_time": 1.1491630125408543, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.479, "pct_cuda_time": 1.1491630125408543, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.06, "cuda_time_us": 9.184, "pct_cuda_time": 0.1311387207492042, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.1311387207492042, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.202, "cuda_time_us": 44.576, "pct_cuda_time": 0.6365025714412595, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.576, "pct_cuda_time": 0.6365025714412595, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2374.726, "cuda_time_us": 204.12600000000003, "pct_cuda_time": 2.9147237055370283, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.171, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1682.038, "cuda_time_us": 63.456, "pct_cuda_time": 0.9060908823891011, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.166, "cuda_time_us": 20.576, "pct_cuda_time": 0.29380556599908825, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.576, "pct_cuda_time": 0.29380556599908825, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 534.151, "cuda_time_us": 3.808, "pct_cuda_time": 0.05437459153015785, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05437459153015785, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 702.719, "cuda_time_us": 21.216, "pct_cuda_time": 0.3029441528108795, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.784, "pct_cuda_time": 0.03975285263129187, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.152, "pct_cuda_time": 0.2449141265560051, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 157.491, "cuda_time_us": 17.856, "pct_cuda_time": 0.2549665720489755, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.583, "pct_cuda_time": 0.22251030982522318, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.273, "pct_cuda_time": 0.032456262223752315, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.25, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 470.153, "cuda_time_us": 134.622, "pct_cuda_time": 1.9222731777764999, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.376, "cuda_time_us": 81.631, "pct_cuda_time": 1.1656124688020786, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.631, "pct_cuda_time": 1.1656124688020786, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.55, "cuda_time_us": 9.024, "pct_cuda_time": 0.1288540740462564, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.1288540740462564, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 165.546, "cuda_time_us": 43.967, "pct_cuda_time": 0.6278066349281644, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.967, "pct_cuda_time": 0.6278066349281644, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2239.563, "cuda_time_us": 203.485, "pct_cuda_time": 2.9055708396833433, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.81, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1601.86, "cuda_time_us": 63.902, "pct_cuda_time": 0.912459335073568, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.247, "cuda_time_us": 21.087, "pct_cuda_time": 0.3011021564066278, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.087, "pct_cuda_time": 0.3011021564066278, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.012, "cuda_time_us": 4.0, "pct_cuda_time": 0.057116167573695226, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.0, "pct_cuda_time": 0.057116167573695226, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 683.413, "cuda_time_us": 21.087, "pct_cuda_time": 0.3011021564066278, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.119, "pct_cuda_time": 0.24444291817352212, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 149.172, "cuda_time_us": 17.728, "pct_cuda_time": 0.25313885468661723, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.22298151820770612, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.587, "cuda_time_us": 3.041, "pct_cuda_time": 0.04342256639790179, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.041, "pct_cuda_time": 0.04342256639790179, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 432.84, "cuda_time_us": 133.406, "pct_cuda_time": 1.9049098628340964, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 150.392, "cuda_time_us": 80.415, "pct_cuda_time": 1.1482491538596755, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.415, "pct_cuda_time": 1.1482491538596755, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.476, "cuda_time_us": 8.96, "pct_cuda_time": 0.1279402153650773, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.1279402153650773, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.36, "cuda_time_us": 44.031, "pct_cuda_time": 0.6287204936093436, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.031, "pct_cuda_time": 0.6287204936093436, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2350.278, "cuda_time_us": 204.736, "pct_cuda_time": 2.923433921092016, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.049, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1673.626, "cuda_time_us": 64.38499999999999, "pct_cuda_time": 0.9193561123080917, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.541, "cuda_time_us": 21.408, "pct_cuda_time": 0.30568572885441686, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.408, "pct_cuda_time": 0.30568572885441686, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 503.974, "cuda_time_us": 3.808, "pct_cuda_time": 0.05437459153015785, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05437459153015785, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 714.475, "cuda_time_us": 21.281, "pct_cuda_time": 0.30387229053395204, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.848, "pct_cuda_time": 0.040666711312470995, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.12, "pct_cuda_time": 0.2444571972154156, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.018748382006065455, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 159.748, "cuda_time_us": 17.887999999999998, "pct_cuda_time": 0.255423501389565, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.776, "pct_cuda_time": 0.225266164910654, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.004, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 457.303, "cuda_time_us": 134.207, "pct_cuda_time": 1.9163473753907287, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.61, "cuda_time_us": 81.695, "pct_cuda_time": 1.1665263274832578, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.695, "pct_cuda_time": 1.1665263274832578, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.148, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.468, "cuda_time_us": 43.52, "pct_cuda_time": 0.6214239032018041, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.52, "pct_cuda_time": 0.6214239032018041, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2258.931, "cuda_time_us": 203.48600000000002, "pct_cuda_time": 2.9055851187252366, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.436, "cuda_time_us": 2.975, "pct_cuda_time": 0.042480149632935824, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 2.975, "pct_cuda_time": 0.042480149632935824, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1603.191, "cuda_time_us": 63.168, "pct_cuda_time": 0.9019785183237949, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.172, "cuda_time_us": 20.48, "pct_cuda_time": 0.29243477797731954, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.48, "pct_cuda_time": 0.29243477797731954, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.835, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 684.321, "cuda_time_us": 20.96, "pct_cuda_time": 0.299288718086163, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.928, "pct_cuda_time": 0.2417156211718782, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.019191032304761598, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 157.127, "cuda_time_us": 17.951999999999998, "pct_cuda_time": 0.25633736007074415, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.2234384475482957, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.304, "pct_cuda_time": 0.03289891252244845, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.986, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.054, "cuda_time_us": 134.27100000000002, "pct_cuda_time": 1.917261234071908, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.754, "cuda_time_us": 82.208, "pct_cuda_time": 1.1738514759745844, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.208, "pct_cuda_time": 1.1738514759745844, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.193, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.353, "cuda_time_us": 43.071, "pct_cuda_time": 0.6150126133916567, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.071, "pct_cuda_time": 0.6150126133916567, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2369.152, "cuda_time_us": 203.00700000000003, "pct_cuda_time": 2.898745457658287, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.435, "cuda_time_us": 3.2, "pct_cuda_time": 0.045692934058956185, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.045692934058956185, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1678.838, "cuda_time_us": 63.072, "pct_cuda_time": 0.9006077303020263, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.902, "cuda_time_us": 20.448, "pct_cuda_time": 0.29197784863673, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.448, "pct_cuda_time": 0.29197784863673, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 464.243, "cuda_time_us": 4.032, "pct_cuda_time": 0.05757309691428479, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.032, "pct_cuda_time": 0.05757309691428479, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 763.195, "cuda_time_us": 20.928, "pct_cuda_time": 0.2988317887455734, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03792513526893363, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.992, "pct_cuda_time": 0.2426294798530573, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 166.739, "cuda_time_us": 17.664, "pct_cuda_time": 0.25222499600543813, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22206765952652702, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.228, "cuda_time_us": 3.2, "pct_cuda_time": 0.045692934058956185, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.045692934058956185, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 472.086, "cuda_time_us": 133.53500000000003, "pct_cuda_time": 1.9067518592383481, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 175.715, "cuda_time_us": 81.343, "pct_cuda_time": 1.1615001047367728, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.343, "pct_cuda_time": 1.1615001047367728, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.081, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.009, "cuda_time_us": 43.2, "pct_cuda_time": 0.6168546097959084, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.2, "pct_cuda_time": 0.6168546097959084, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2296.699, "cuda_time_us": 203.709, "pct_cuda_time": 2.90876934506747, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.762, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1644.743, "cuda_time_us": 63.775, "pct_cuda_time": 0.9106458967531033, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.538, "cuda_time_us": 21.056, "pct_cuda_time": 0.3006595061079317, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.056, "pct_cuda_time": 0.3006595061079317, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 524.321, "cuda_time_us": 3.808, "pct_cuda_time": 0.05437459153015785, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05437459153015785, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 675.254, "cuda_time_us": 20.831, "pct_cuda_time": 0.2974467216819113, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.038382064609523196, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.863, "pct_cuda_time": 0.24078748344880563, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01827717362358247, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 154.819, "cuda_time_us": 18.08, "pct_cuda_time": 0.25816507743310235, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.968, "pct_cuda_time": 0.22800774095419132, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.366, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04386521669659793, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 435.45, "cuda_time_us": 133.79, "pct_cuda_time": 1.9103930149211708, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.222, "cuda_time_us": 80.479, "pct_cuda_time": 1.1491630125408543, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.479, "pct_cuda_time": 1.1491630125408543, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.171, "cuda_time_us": 9.184, "pct_cuda_time": 0.1311387207492042, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.1311387207492042, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.952, "cuda_time_us": 44.127, "pct_cuda_time": 0.6300912816311124, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.127, "pct_cuda_time": 0.6300912816311124, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2286.463, "cuda_time_us": 204.25300000000001, "pct_cuda_time": 2.916537143857493, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.618, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1623.733, "cuda_time_us": 63.615, "pct_cuda_time": 0.9083612500501554, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.591, "cuda_time_us": 20.8, "pct_cuda_time": 0.29700407138321516, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.8, "pct_cuda_time": 0.29700407138321516, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 496.478, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05483152087074741, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 670.699, "cuda_time_us": 21.216, "pct_cuda_time": 0.3029441528108795, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.037468205928344066, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.28, "pct_cuda_time": 0.24674184391836337, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018734102964172033, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 161.413, "cuda_time_us": 17.759, "pct_cuda_time": 0.25358150498531334, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.647, "pct_cuda_time": 0.2234241685064023, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.154, "cuda_time_us": 3.2, "pct_cuda_time": 0.045692934058956185, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.045692934058956185, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.649, "cuda_time_us": 134.43, "pct_cuda_time": 1.9195316017329624, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.05, "cuda_time_us": 81.279, "pct_cuda_time": 1.1605862460555936, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.279, "pct_cuda_time": 1.1605862460555936, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.921, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12839714470566688, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.309, "cuda_time_us": 44.159, "pct_cuda_time": 0.6305482109717018, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.159, "pct_cuda_time": 0.6305482109717018, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2306.195, "cuda_time_us": 203.709, "pct_cuda_time": 2.90876934506747, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.267, "cuda_time_us": 3.232, "pct_cuda_time": 0.04614986339954574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04614986339954574, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1622.246, "cuda_time_us": 63.615, "pct_cuda_time": 0.9083612500501554, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.711, "cuda_time_us": 21.023, "pct_cuda_time": 0.30018829772544864, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.023, "pct_cuda_time": 0.30018829772544864, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 498.575, "cuda_time_us": 3.744, "pct_cuda_time": 0.05346073284897873, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05346073284897873, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 698.011, "cuda_time_us": 21.056, "pct_cuda_time": 0.3006595061079317, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03792513526893363, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 17.088, "pct_cuda_time": 0.244000267874826, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018734102964172033, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 152.512, "cuda_time_us": 17.792, "pct_cuda_time": 0.25405271336779633, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.68, "pct_cuda_time": 0.22389537688888528, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030157336478911077, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 99.877, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.044779075377777054, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.344, "cuda_time_us": 133.726, "pct_cuda_time": 1.9094791562399918, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.097, "cuda_time_us": 80.895, "pct_cuda_time": 1.1551030939685187, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.895, "pct_cuda_time": 1.1551030939685187, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.481, "cuda_time_us": 9.088, "pct_cuda_time": 0.12976793272743553, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.12976793272743553, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.982, "cuda_time_us": 43.743, "pct_cuda_time": 0.6246081295440375, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.743, "pct_cuda_time": 0.6246081295440375, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2259.884, "cuda_time_us": 203.007, "pct_cuda_time": 2.8987454576582867, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.229, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04295135801541881, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1611.057, "cuda_time_us": 62.816, "pct_cuda_time": 0.8969522955773098, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.135, "cuda_time_us": 20.608, "pct_cuda_time": 0.2942624953396778, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.2942624953396778, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[6, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 488.662, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.05391766218956829, "trace": "_C::rotary_embedding(int64[6], bfloat16[6, 4096], bfloat16[6, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 685.672, "cuda_time_us": 20.832, "pct_cuda_time": 0.2974610007238047, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.037468205928344066, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[6], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 16.896, "pct_cuda_time": 0.24125869183128862, "trace": "_vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018734102964172033, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[6, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[6, 1, 32, 128], None, None, None, None, int32[6], None, None, int32[6, 33], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[6, 32, 128], bfloat16[6, 8, 128], bfloat16[6, 8, 128], bfloat16[6, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 149.836, "cuda_time_us": 17.6, "pct_cuda_time": 0.251311137324259, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.22161073018593744, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.08, "pct_cuda_time": 0.029700407138321516, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[6, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.54, "cuda_time_us": 3.232, "pct_cuda_time": 0.04614986339954574, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.04614986339954574, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 437.491, "cuda_time_us": 133.951, "pct_cuda_time": 1.9126919406660121, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 152.323, "cuda_time_us": 81.439, "pct_cuda_time": 1.1628708927585412, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.439, "pct_cuda_time": 1.1628708927585412, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[6, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.812, "cuda_time_us": 8.928, "pct_cuda_time": 0.12748328602448775, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12748328602448775, "trace": "_C::silu_and_mul(bfloat16[6, 14336], bfloat16[6, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.74, "cuda_time_us": 43.584, "pct_cuda_time": 0.6223377618829832, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.584, "pct_cuda_time": 0.6223377618829832, "trace": "mm(bfloat16[6, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[6, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[6, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.745, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04340828735600837, "trace": "_C::fused_add_rms_norm(bfloat16[6, 4096], bfloat16[6, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 472.111, "cuda_time_us": 349.78700000000003, "pct_cuda_time": 4.994623226775033, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 4.96, "pct_cuda_time": 0.07082404779138207, "trace": "index_select(bfloat16[6, 4096], 0, int64[6])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.010966304174149483, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[6, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 344.059, "pct_cuda_time": 4.912832874809501, "trace": "mm(bfloat16[6, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[6, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[6, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3097.422, "cuda_time_us": 116.67, "pct_cuda_time": 1.6659358177057555, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010509374833559921, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010509374833559921, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011423233514739046, "trace": "copy_(int32[6], int32[6], True) <- _to_copy(int32[6], 3, 0, None, None, True, None) <- to(int32[6], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.010966304174149483, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.010966304174149483, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.01095202513225606, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.010966304174149483, "trace": "copy_(bfloat16[6], bfloat16[6], True) <- _to_copy(bfloat16[6], 15, 0, None, None, True, None) <- to(bfloat16[6], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 4.353, "pct_cuda_time": 0.06215666936207383, "trace": "copy_(float32[6, 128256], bfloat16[6, 128256], False) <- _to_copy(bfloat16[6, 128256], 6, None, None, None, False, None) <- to(bfloat16[6, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 5.536, "pct_cuda_time": 0.07904877592199418, "trace": "div_(float32[6, 128256], bfloat16[6, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 34.784, "pct_cuda_time": 0.49668219322085366, "trace": "_softmax(float32[6, 128256], -1, False) <- softmax(float32[6, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.255, "pct_cuda_time": 0.4034543286986896, "trace": "_log_softmax(float32[6, 128256], -1, False) <- log_softmax(float32[6, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.02604497241360502, "trace": "copy_(int64[6], int32[6], False) <- _to_copy(int32[6], 4, None, None, None, False, None) <- to(int32[6], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 6.24, "pct_cuda_time": 0.08910122141496456, "trace": "index(float32[6, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 27.712, "pct_cuda_time": 0.3957008089505605, "trace": "argmax(float32[6, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.623, "pct_cuda_time": 0.03745392688645065, "trace": "copy_(int64[6], int64[6], False) <- _to_copy(int64[6], 4, 0, None, None, False, None) <- to(int64[6], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] } }