{ "context": { "python_version": "3.12.9 | packaged by Anaconda, Inc. | (main, Feb 6 2025, 18:56:27) [GCC 11.2.0]", "torch_version": "2.5.1+cu124", "engine_args": { "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "served_model_name": null, "tokenizer": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "task": "auto", "skip_tokenizer_init": false, "tokenizer_mode": "auto", "trust_remote_code": false, "allowed_local_media_path": null, "download_dir": null, "load_format": "dummy", "config_format": "auto", "dtype": "auto", "kv_cache_dtype": "auto", "seed": 0, "max_model_len": null, "distributed_executor_backend": null, "pipeline_parallel_size": 1, "tensor_parallel_size": 1, "max_parallel_loading_workers": null, "block_size": null, "enable_prefix_caching": false, "disable_sliding_window": false, "use_v2_block_manager": true, "swap_space": 4, "cpu_offload_gb": 0, "gpu_memory_utilization": 0.9, "max_num_batched_tokens": 8000, "max_num_partial_prefills": 1, "max_long_partial_prefills": 1, "long_prefill_token_threshold": 0, "max_num_seqs": 256, "max_logprobs": 20, "disable_log_stats": false, "revision": null, "code_revision": null, "rope_scaling": null, "rope_theta": null, "hf_overrides": null, "tokenizer_revision": null, "quantization": null, "enforce_eager": true, "max_seq_len_to_capture": 8192, "disable_custom_all_reduce": false, "tokenizer_pool_size": 0, "tokenizer_pool_type": "ray", "tokenizer_pool_extra_config": null, "limit_mm_per_prompt": null, "mm_processor_kwargs": null, "disable_mm_preprocessor_cache": false, "enable_lora": false, "enable_lora_bias": false, "max_loras": 1, "max_lora_rank": 16, "enable_prompt_adapter": false, "max_prompt_adapters": 1, "max_prompt_adapter_token": 0, "fully_sharded_loras": false, "lora_extra_vocab_size": 256, "long_lora_scaling_factors": null, "lora_dtype": "auto", "max_cpu_loras": null, "device": "auto", "num_scheduler_steps": 1, "multi_step_stream_outputs": true, "ray_workers_use_nsight": false, "num_gpu_blocks_override": null, "num_lookahead_slots": 0, "model_loader_extra_config": null, "ignore_patterns": [], "preemption_mode": null, "scheduler_delay_factor": 0.0, "enable_chunked_prefill": null, "guided_decoding_backend": "xgrammar", "logits_processor_pattern": null, "speculative_model": null, "speculative_model_quantization": null, "speculative_draft_tensor_parallel_size": null, "num_speculative_tokens": null, "speculative_disable_mqa_scorer": false, "speculative_max_model_len": null, "speculative_disable_by_batch_size": null, "ngram_prompt_lookup_max": null, "ngram_prompt_lookup_min": null, "spec_decoding_acceptance_method": "rejection_sampler", "typical_acceptance_sampler_posterior_threshold": null, "typical_acceptance_sampler_posterior_alpha": null, "qlora_adapter_name_or_path": null, "disable_logprobs_during_spec_decoding": null, "otlp_traces_endpoint": null, "collect_detailed_traces": null, "disable_async_output_proc": false, "scheduling_policy": "fcfs", "scheduler_cls": "vllm.core.scheduler.Scheduler", "override_neuron_config": null, "override_pooler_config": null, "compilation_config": null, "worker_cls": "auto", "kv_transfer_config": null, "generation_config": null, "override_generation_config": null, "enable_sleep_mode": false, "model_impl": "auto", "calculate_kv_scales": false, "additional_config": null }, "prompt_len": 0, "batch_size": 14, "num_steps": 2, "complete_num_requests_per_step": null, "save_chrome_traces_folder": null }, "prefill": { "metadata": { "num_running_seqs": null }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 78957.91800000002, "pct_cuda_time": 99.35157796511128, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 103.358, "pct_cuda_time": 0.13005383950622873, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 103.358, "pct_cuda_time": 0.13005383950622873, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 78816.38400000002, "pct_cuda_time": 99.17348783062072, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 2401.0229999999997, "pct_cuda_time": 3.021171654760771, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 58.463, "pct_cuda_time": 0.0735631264058191, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 2342.56, "pct_cuda_time": 2.9476085283549525, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 17332.257999999998, "pct_cuda_time": 21.808923355836498, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 7920.378999999999, "pct_cuda_time": 9.96609550585832, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.87200000000001, "pct_cuda_time": 0.030037783787347797, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 7896.5070000000005, "pct_cuda_time": 9.936057722070975, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 1525.9599999999998, "pct_cuda_time": 1.9200928513799103, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 1525.9599999999998, "pct_cuda_time": 1.9200928513799103, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 2414.499, "pct_cuda_time": 3.038128305829735, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 623.863, "pct_cuda_time": 0.7849975664764641, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 1743.052, "pct_cuda_time": 2.193256497407177, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 47.584, "pct_cuda_time": 0.059874241946094045, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 5471.420000000001, "pct_cuda_time": 6.884606692768534, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.651000000000007, "pct_cuda_time": 0.029759702762841934, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 5447.768999999999, "pct_cuda_time": 6.85484699000569, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 59083.103, "pct_cuda_time": 74.34339282002342, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 36071.62800000001, "pct_cuda_time": 45.388394886127706, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.810000000000006, "pct_cuda_time": 0.0299597701062647, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 36047.81799999999, "pct_cuda_time": 45.358435116021425, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 4935.549999999999, "pct_cuda_time": 6.210329414026656, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 4935.549999999999, "pct_cuda_time": 6.210329414026656, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 18075.925000000003, "pct_cuda_time": 22.744668519869077, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 24.93000000000001, "pct_cuda_time": 0.03136904950647539, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 18050.995, "pct_cuda_time": 22.713299470362596, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 38.176, "pct_cuda_time": 0.048036294984324275, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 38.176, "pct_cuda_time": 0.048036294984324275, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 365.915, "pct_cuda_time": 0.46042542118579777, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 10.016, "pct_cuda_time": 0.012602984350455572, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 355.163, "pct_cuda_time": 0.44689633894377523, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 149.407, "pct_cuda_time": 0.1879966137029269, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 18.112000000000002, "pct_cuda_time": 0.022790061157692824, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 7.328, "pct_cuda_time": 0.009220713789949923, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 10.368, "pct_cuda_time": 0.013045900733378931, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 37.248, "pct_cuda_time": 0.04686860633843541, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 30.048, "pct_cuda_time": 0.037808953051366716, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.888, "pct_cuda_time": 0.0023756424174980148, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 10.784, "pct_cuda_time": 0.013569347367742901, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 30.079, "pct_cuda_time": 0.03784795989190826, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 3.552, "pct_cuda_time": 0.004469428954953893, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 84801.799, "cuda_time_us": 78957.91800000002, "pct_cuda_time": 99.35157796511128, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 286.408, "cuda_time_us": 103.358, "pct_cuda_time": 0.13005383950622873, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 103.358, "pct_cuda_time": 0.13005383950622873, "trace": "index_select(bfloat16[128256, 4096], 0, int64[3584]) <- embedding(bfloat16[128256, 4096], int64[3584], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4051.731, "cuda_time_us": 2459.932, "pct_cuda_time": 3.0952959763563173, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 254.646, "cuda_time_us": 58.463, "pct_cuda_time": 0.0735631264058191, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 58.463, "pct_cuda_time": 0.0735631264058191, "trace": "_C::rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2961.023, "cuda_time_us": 544.565, "pct_cuda_time": 0.68521806837119, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 384.328, "cuda_time_us": 250.203, "pct_cuda_time": 0.31482672658117367, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0009651047321085687, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 249.436, "pct_cuda_time": 0.3138616218490651, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 968.491, "cuda_time_us": 46.879, "pct_cuda_time": 0.058987150895068556, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 46.879, "pct_cuda_time": 0.058987150895068556, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1038.853, "cuda_time_us": 74.622, "pct_cuda_time": 0.09389575660939453, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.328, "pct_cuda_time": 0.024320135935064425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.823, "pct_cuda_time": 0.06772468317637483, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.471, "pct_cuda_time": 0.0018509374979552864, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 312.875, "cuda_time_us": 172.86100000000002, "pct_cuda_time": 0.2175084342855532, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0009248396063882632, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 172.126, "pct_cuda_time": 0.2165835946791649, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 123.384, "cuda_time_us": 36.031, "pct_cuda_time": 0.04533727327588505, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.031, "pct_cuda_time": 0.04533727327588505, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 592.86, "cuda_time_us": 1820.873, "pct_cuda_time": 2.2911775083034236, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 206.41, "cuda_time_us": 1111.6670000000001, "pct_cuda_time": 1.3987941098160839, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1110.93, "pct_cuda_time": 1.3978667536393379, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 139.144, "cuda_time_us": 153.534, "pct_cuda_time": 0.1931895566356675, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.534, "pct_cuda_time": 0.1931895566356675, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 173.521, "cuda_time_us": 555.672, "pct_cuda_time": 0.6991938418516722, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 554.936, "pct_cuda_time": 0.6982677439601053, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2594.453, "cuda_time_us": 2434.8160000000003, "pct_cuda_time": 3.063692885806593, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.319, "cuda_time_us": 38.623, "pct_cuda_time": 0.04859874845922978, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.623, "pct_cuda_time": 0.04859874845922978, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1856.25, "cuda_time_us": 537.0169999999999, "pct_cuda_time": 0.6757205318419128, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 162.037, "cuda_time_us": 242.429, "pct_cuda_time": 0.305044817601497, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 241.693, "pct_cuda_time": 0.30411871970993, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 540.799, "cuda_time_us": 47.423, "pct_cuda_time": 0.05967165803231376, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.423, "pct_cuda_time": 0.05967165803231376, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 778.194, "cuda_time_us": 74.431, "pct_cuda_time": 0.09365542414025145, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.2, "pct_cuda_time": 0.024159075432183204, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.919, "pct_cuda_time": 0.06784547855353573, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0016508701545325192, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 213.116, "cuda_time_us": 172.734, "pct_cuda_time": 0.21734863206785074, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 171.966, "pct_cuda_time": 0.21638226905056338, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.103, "cuda_time_us": 35.744, "pct_cuda_time": 0.04497614542958106, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 35.744, "pct_cuda_time": 0.04497614542958106, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 486.154, "cuda_time_us": 1823.4320000000002, "pct_cuda_time": 2.294397460075869, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.611, "cuda_time_us": 1113.681, "pct_cuda_time": 1.4013282961661055, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1112.945, "pct_cuda_time": 1.4004021982745383, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.853, "cuda_time_us": 153.63, "pct_cuda_time": 0.19331035201282842, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.63, "pct_cuda_time": 0.19331035201282842, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 157.765, "cuda_time_us": 556.121, "pct_cuda_time": 0.6997588118969351, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 555.385, "pct_cuda_time": 0.6988327140053682, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2594.276, "cuda_time_us": 2440.768, "pct_cuda_time": 3.0711821991905697, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 94.432, "cuda_time_us": 38.079, "pct_cuda_time": 0.047914241321984595, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.079, "pct_cuda_time": 0.047914241321984595, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1821.903, "cuda_time_us": 534.842, "pct_cuda_time": 0.6729837615781109, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 157.91, "cuda_time_us": 242.557, "pct_cuda_time": 0.3052058781043782, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 241.821, "pct_cuda_time": 0.30427978021281116, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 537.364, "cuda_time_us": 47.36, "pct_cuda_time": 0.059592386066051904, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.36, "pct_cuda_time": 0.059592386066051904, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 757.579, "cuda_time_us": 73.823, "pct_cuda_time": 0.09289038675156566, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.295, "pct_cuda_time": 0.024278612524165362, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.12, "pct_cuda_time": 0.06684010869570686, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.001771665531693435, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 208.314, "cuda_time_us": 171.102, "pct_cuda_time": 0.21529511065611515, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 170.366, "pct_cuda_time": 0.21436901276454812, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.018, "cuda_time_us": 37.119, "pct_cuda_time": 0.04670628755037544, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.119, "pct_cuda_time": 0.04670628755037544, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 500.026, "cuda_time_us": 1830.728, "pct_cuda_time": 2.303577908740099, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.129, "cuda_time_us": 1118.9940000000001, "pct_cuda_time": 1.408013565320855, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1118.258, "pct_cuda_time": 1.407087467429288, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.886, "cuda_time_us": 153.566, "pct_cuda_time": 0.1932298217613878, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.566, "pct_cuda_time": 0.1932298217613878, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.077, "cuda_time_us": 558.168, "pct_cuda_time": 0.702334521657856, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 557.432, "pct_cuda_time": 0.701408423766289, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2539.751, "cuda_time_us": 2431.135, "pct_cuda_time": 3.059061138063579, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.438, "cuda_time_us": 37.695, "pct_cuda_time": 0.047431059813340934, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.695, "pct_cuda_time": 0.047431059813340934, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1804.457, "cuda_time_us": 531.9929999999999, "pct_cuda_time": 0.6693989071038249, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.618, "cuda_time_us": 243.453, "pct_cuda_time": 0.30633330162454675, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 242.717, "pct_cuda_time": 0.30540720373297975, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 501.43, "cuda_time_us": 46.879, "pct_cuda_time": 0.058987150895068556, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 46.879, "pct_cuda_time": 0.058987150895068556, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 767.278, "cuda_time_us": 74.783, "pct_cuda_time": 0.09409834052317483, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.168, "pct_cuda_time": 0.024118810306462898, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 54.111, "pct_cuda_time": 0.06808706930785756, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.001892460908854351, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 215.359, "cuda_time_us": 166.878, "pct_cuda_time": 0.2099801140610348, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 166.142, "pct_cuda_time": 0.2090540161694678, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.476, "cuda_time_us": 35.871, "pct_cuda_time": 0.04513594764728353, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 35.871, "pct_cuda_time": 0.04513594764728353, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 502.944, "cuda_time_us": 1825.576, "pct_cuda_time": 2.297095223499129, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 175.039, "cuda_time_us": 1114.801, "pct_cuda_time": 1.402737575566316, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1114.033, "pct_cuda_time": 1.4017712125490287, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.289, "cuda_time_us": 153.182, "pct_cuda_time": 0.19274664025274413, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.182, "pct_cuda_time": 0.19274664025274413, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.09, "cuda_time_us": 557.593, "pct_cuda_time": 0.7016110076800691, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 556.857, "pct_cuda_time": 0.7006849097885022, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2497.578, "cuda_time_us": 2428.384, "pct_cuda_time": 3.0555995955368114, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.123, "cuda_time_us": 37.247, "pct_cuda_time": 0.04686734805325665, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.247, "pct_cuda_time": 0.04686734805325665, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1790.874, "cuda_time_us": 530.713, "pct_cuda_time": 0.6677883020750127, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.932, "cuda_time_us": 242.909, "pct_cuda_time": 0.30564879448730153, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 242.173, "pct_cuda_time": 0.30472269659573453, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 528.661, "cuda_time_us": 47.423, "pct_cuda_time": 0.05967165803231376, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.423, "pct_cuda_time": 0.05967165803231376, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 754.767, "cuda_time_us": 74.463, "pct_cuda_time": 0.09369568926597176, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.296, "pct_cuda_time": 0.024279870809344118, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.535, "pct_cuda_time": 0.06736229704489206, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.0020535214117355723, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.146, "cuda_time_us": 165.91799999999998, "pct_cuda_time": 0.20877216028942566, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 165.182, "pct_cuda_time": 0.20784606239785866, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.277, "cuda_time_us": 36.287, "pct_cuda_time": 0.045659394281647496, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.287, "pct_cuda_time": 0.045659394281647496, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.458, "cuda_time_us": 1824.1370000000002, "pct_cuda_time": 2.295284551126895, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.843, "cuda_time_us": 1113.842, "pct_cuda_time": 1.4015308800798858, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1113.106, "pct_cuda_time": 1.4006047821883187, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.157, "cuda_time_us": 152.926, "pct_cuda_time": 0.1924245192469817, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 152.926, "pct_cuda_time": 0.1924245192469817, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.146, "cuda_time_us": 557.369, "pct_cuda_time": 0.7013291518000272, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 556.633, "pct_cuda_time": 0.7004030539084601, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2723.794, "cuda_time_us": 2438.2709999999997, "pct_cuda_time": 3.0680402610992066, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.363, "cuda_time_us": 37.44, "pct_cuda_time": 0.047110197092757246, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.44, "pct_cuda_time": 0.047110197092757246, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1984.737, "cuda_time_us": 538.488, "pct_cuda_time": 0.6775714693398682, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.892, "cuda_time_us": 243.901, "pct_cuda_time": 0.30689701338463105, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 243.133, "pct_cuda_time": 0.3059306503673437, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 667.339, "cuda_time_us": 47.103, "pct_cuda_time": 0.059269006775110704, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.103, "pct_cuda_time": 0.059269006775110704, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 767.217, "cuda_time_us": 74.719, "pct_cuda_time": 0.09401781027173421, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.712, "pct_cuda_time": 0.024803317443708086, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.727, "pct_cuda_time": 0.0676038877992139, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0016106050288122139, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 221.699, "cuda_time_us": 172.76500000000001, "pct_cuda_time": 0.21738763890839224, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 171.997, "pct_cuda_time": 0.21642127589110494, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.032, "cuda_time_us": 35.935, "pct_cuda_time": 0.04521647789872414, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 35.935, "pct_cuda_time": 0.04521647789872414, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 482.027, "cuda_time_us": 1826.408, "pct_cuda_time": 2.2981421167678575, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 177.275, "cuda_time_us": 1116.369, "pct_cuda_time": 1.4047105667266109, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0009248396063882632, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1115.634, "pct_cuda_time": 1.4037857271202228, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.31, "cuda_time_us": 152.733, "pct_cuda_time": 0.19218167020748111, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 152.733, "pct_cuda_time": 0.19218167020748111, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.718, "cuda_time_us": 557.3059999999999, "pct_cuda_time": 0.7012498798337652, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 556.569, "pct_cuda_time": 0.7003225236570194, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2542.913, "cuda_time_us": 2433.284, "pct_cuda_time": 3.061765192912733, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.366, "cuda_time_us": 37.408, "pct_cuda_time": 0.047069931967036946, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.408, "pct_cuda_time": 0.047069931967036946, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1838.124, "cuda_time_us": 533.818, "pct_cuda_time": 0.6716952775550611, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.384, "cuda_time_us": 243.03699999999998, "pct_cuda_time": 0.30580985499018276, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 242.301, "pct_cuda_time": 0.30488375709861576, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 529.847, "cuda_time_us": 47.584, "pct_cuda_time": 0.059874241946094045, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.584, "pct_cuda_time": 0.059874241946094045, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 784.332, "cuda_time_us": 74.08, "pct_cuda_time": 0.09321376604250686, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.232, "pct_cuda_time": 0.024199340557903508, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.536, "pct_cuda_time": 0.06736355533007084, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0016508701545325192, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 209.634, "cuda_time_us": 169.117, "pct_cuda_time": 0.2127974145762774, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 168.381, "pct_cuda_time": 0.21187131668471043, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.106, "cuda_time_us": 36.192, "pct_cuda_time": 0.04553985718966534, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.192, "pct_cuda_time": 0.04553985718966534, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.116, "cuda_time_us": 1825.866, "pct_cuda_time": 2.2974601262009697, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.414, "cuda_time_us": 1115.4750000000001, "pct_cuda_time": 1.4035856597768002, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1114.738, "pct_cuda_time": 1.4026583036000544, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.006, "cuda_time_us": 153.726, "pct_cuda_time": 0.19343114738998932, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.726, "pct_cuda_time": 0.19343114738998932, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.05, "cuda_time_us": 556.665, "pct_cuda_time": 0.7004433190341803, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.769, "pct_cuda_time": 0.0009676213024660878, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 555.896, "pct_cuda_time": 0.6994756977317143, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2440.746, "cuda_time_us": 2430.335, "pct_cuda_time": 3.0580545099205714, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.042, "cuda_time_us": 37.984, "pct_cuda_time": 0.04779470423000244, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.984, "pct_cuda_time": 0.04779470423000244, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1754.146, "cuda_time_us": 530.712, "pct_cuda_time": 0.667787043789834, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.671, "cuda_time_us": 243.164, "pct_cuda_time": 0.3059696572078852, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 242.428, "pct_cuda_time": 0.3050435593163182, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 527.378, "cuda_time_us": 46.847, "pct_cuda_time": 0.05894688576934826, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 46.847, "pct_cuda_time": 0.05894688576934826, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 734.424, "cuda_time_us": 74.71900000000001, "pct_cuda_time": 0.09401781027173421, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.328, "pct_cuda_time": 0.024320135935064425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.887, "pct_cuda_time": 0.06780521342781543, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.001892460908854351, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.346, "cuda_time_us": 165.982, "pct_cuda_time": 0.20885269054086628, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 165.246, "pct_cuda_time": 0.20792659264929927, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.602, "cuda_time_us": 36.863, "pct_cuda_time": 0.04638416654461299, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.863, "pct_cuda_time": 0.04638416654461299, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.458, "cuda_time_us": 1824.776, "pct_cuda_time": 2.296088595356122, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.771, "cuda_time_us": 1112.5610000000001, "pct_cuda_time": 1.3999190167658948, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1111.825, "pct_cuda_time": 1.3989929188743278, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.416, "cuda_time_us": 154.398, "pct_cuda_time": 0.19427671503011573, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 154.398, "pct_cuda_time": 0.19427671503011573, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.204, "cuda_time_us": 557.817, "pct_cuda_time": 0.7018928635601114, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 557.081, "pct_cuda_time": 0.7009667656685443, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2529.289, "cuda_time_us": 2430.656, "pct_cuda_time": 3.0584584194629527, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.823, "cuda_time_us": 38.527, "pct_cuda_time": 0.04847795308206888, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.527, "pct_cuda_time": 0.04847795308206888, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1724.561, "cuda_time_us": 534.297, "pct_cuda_time": 0.672297996155687, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 168.739, "cuda_time_us": 244.126, "pct_cuda_time": 0.3071801275498519, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 243.389, "pct_cuda_time": 0.30625277137310614, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 491.965, "cuda_time_us": 47.615, "pct_cuda_time": 0.059913248786635585, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.615, "pct_cuda_time": 0.059913248786635585, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 709.871, "cuda_time_us": 74.911, "pct_cuda_time": 0.09425940102605605, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.327, "pct_cuda_time": 0.02431887764988567, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.76, "pct_cuda_time": 0.06764541121011297, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.0022951121660574046, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.327, "cuda_time_us": 167.64499999999998, "pct_cuda_time": 0.21094521879314337, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 166.909, "pct_cuda_time": 0.21001912090157637, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.179, "cuda_time_us": 36.992, "pct_cuda_time": 0.04654648533267297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.992, "pct_cuda_time": 0.04654648533267297, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 579.095, "cuda_time_us": 1820.8400000000001, "pct_cuda_time": 2.2911359848925246, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.735, "cuda_time_us": 1109.777, "pct_cuda_time": 1.3964159508282281, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1109.041, "pct_cuda_time": 1.395489852936661, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.001, "cuda_time_us": 153.31, "pct_cuda_time": 0.19290770075562538, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.31, "pct_cuda_time": 0.19290770075562538, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 271.885, "cuda_time_us": 557.753, "pct_cuda_time": 0.7018123333086709, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 557.017, "pct_cuda_time": 0.7008862354171038, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2573.522, "cuda_time_us": 2437.088, "pct_cuda_time": 3.066551709732735, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.418, "cuda_time_us": 36.991, "pct_cuda_time": 0.04654522704749421, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.991, "pct_cuda_time": 0.04654522704749421, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1803.13, "cuda_time_us": 537.048, "pct_cuda_time": 0.6757595386824544, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.008, "cuda_time_us": 243.80499999999998, "pct_cuda_time": 0.30677621800747007, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 243.069, "pct_cuda_time": 0.30585012011590307, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 526.105, "cuda_time_us": 46.623, "pct_cuda_time": 0.05866502988930612, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 46.623, "pct_cuda_time": 0.05866502988930612, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.495, "cuda_time_us": 74.015, "pct_cuda_time": 0.09313197750588749, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.232, "pct_cuda_time": 0.024199340557903508, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.503, "pct_cuda_time": 0.06732203191917177, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0016106050288122139, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 227.318, "cuda_time_us": 172.60500000000002, "pct_cuda_time": 0.21718631327979077, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0009248396063882632, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 171.87, "pct_cuda_time": 0.21626147367340248, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.848, "cuda_time_us": 35.968, "pct_cuda_time": 0.04525800130962321, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 35.968, "pct_cuda_time": 0.04525800130962321, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 513.057, "cuda_time_us": 1827.0810000000001, "pct_cuda_time": 2.2989889426931627, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 188.291, "cuda_time_us": 1115.0910000000001, "pct_cuda_time": 1.4031024782681565, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1114.354, "pct_cuda_time": 1.4021751220914105, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.815, "cuda_time_us": 153.374, "pct_cuda_time": 0.19298823100706597, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.374, "pct_cuda_time": 0.19298823100706597, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 162.906, "cuda_time_us": 558.616, "pct_cuda_time": 0.7028982334179402, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0009248396063882632, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 557.881, "pct_cuda_time": 0.701973393811552, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2468.511, "cuda_time_us": 2431.969, "pct_cuda_time": 3.0601105479026645, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.074, "cuda_time_us": 37.216, "pct_cuda_time": 0.04682834121271511, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.216, "pct_cuda_time": 0.04682834121271511, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1766.779, "cuda_time_us": 530.809, "pct_cuda_time": 0.6679090974521736, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 157.865, "cuda_time_us": 243.325, "pct_cuda_time": 0.3061722411216655, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 242.557, "pct_cuda_time": 0.3052058781043782, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.581, "cuda_time_us": 47.039, "pct_cuda_time": 0.05918847652367009, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.039, "pct_cuda_time": 0.05918847652367009, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 760.706, "cuda_time_us": 74.655, "pct_cuda_time": 0.0939372800202936, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.296, "pct_cuda_time": 0.024279870809344118, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.983, "pct_cuda_time": 0.06792600880497635, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.0017314004059731294, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 205.22, "cuda_time_us": 165.79, "pct_cuda_time": 0.20861109978654443, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 165.054, "pct_cuda_time": 0.20768500189497743, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.486, "cuda_time_us": 36.543, "pct_cuda_time": 0.04598151528740994, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.543, "pct_cuda_time": 0.04598151528740994, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.547, "cuda_time_us": 1827.4010000000003, "pct_cuda_time": 2.2993915939503657, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.646, "cuda_time_us": 1115.0900000000001, "pct_cuda_time": 1.4031012199829778, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1114.354, "pct_cuda_time": 1.4021751220914105, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.525, "cuda_time_us": 153.822, "pct_cuda_time": 0.19355194276715026, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.822, "pct_cuda_time": 0.19355194276715026, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.0, "cuda_time_us": 558.489, "pct_cuda_time": 0.7027384312002378, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.769, "pct_cuda_time": 0.0009676213024660878, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 557.72, "pct_cuda_time": 0.7017708098977717, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2571.706, "cuda_time_us": 2440.385, "pct_cuda_time": 3.0707002759671047, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.081, "cuda_time_us": 38.143, "pct_cuda_time": 0.04799477157342521, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.143, "pct_cuda_time": 0.04799477157342521, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1837.28, "cuda_time_us": 538.329, "pct_cuda_time": 0.6773714019964454, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.788, "cuda_time_us": 244.60399999999998, "pct_cuda_time": 0.30778158786529897, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 243.868, "pct_cuda_time": 0.30685548997373197, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 498.878, "cuda_time_us": 48.064, "pct_cuda_time": 0.06047821883189862, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.064, "pct_cuda_time": 0.06047821883189862, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 728.28, "cuda_time_us": 74.399, "pct_cuda_time": 0.09361515901453116, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.392, "pct_cuda_time": 0.024400666186505035, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.504, "pct_cuda_time": 0.06732329020435053, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.0018912026236755912, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 222.158, "cuda_time_us": 171.262, "pct_cuda_time": 0.21549643628471665, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 170.526, "pct_cuda_time": 0.21457033839314965, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.16, "cuda_time_us": 37.408, "pct_cuda_time": 0.047069931967036946, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.408, "pct_cuda_time": 0.047069931967036946, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 474.819, "cuda_time_us": 1826.505, "pct_cuda_time": 2.298264170430197, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.103, "cuda_time_us": 1115.0890000000002, "pct_cuda_time": 1.403099961697799, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1114.353, "pct_cuda_time": 1.402173863806232, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.747, "cuda_time_us": 153.855, "pct_cuda_time": 0.19359346617804932, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.855, "pct_cuda_time": 0.19359346617804932, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.847, "cuda_time_us": 557.561, "pct_cuda_time": 0.7015707425543489, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 556.793, "pct_cuda_time": 0.7006043795370617, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2744.755, "cuda_time_us": 2441.729, "pct_cuda_time": 3.072391411247357, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.353, "cuda_time_us": 38.399, "pct_cuda_time": 0.04831689257918765, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.399, "pct_cuda_time": 0.04831689257918765, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2018.529, "cuda_time_us": 538.5559999999999, "pct_cuda_time": 0.6776570327320238, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.283, "cuda_time_us": 243.83599999999998, "pct_cuda_time": 0.30681522484801166, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0009651047321085687, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 243.069, "pct_cuda_time": 0.30585012011590307, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 529.364, "cuda_time_us": 47.584, "pct_cuda_time": 0.059874241946094045, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.584, "pct_cuda_time": 0.059874241946094045, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 969.621, "cuda_time_us": 74.20899999999999, "pct_cuda_time": 0.09337608483056682, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.136, "pct_cuda_time": 0.02407854518074259, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.568, "pct_cuda_time": 0.06740382045579113, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.505, "pct_cuda_time": 0.0018937191940331104, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 213.583, "cuda_time_us": 172.927, "pct_cuda_time": 0.21759148110735127, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 172.19, "pct_cuda_time": 0.21666412493060555, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 102.95, "cuda_time_us": 35.839, "pct_cuda_time": 0.04509568252156322, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 35.839, "pct_cuda_time": 0.04509568252156322, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 472.152, "cuda_time_us": 1828.935, "pct_cuda_time": 2.3013218034145826, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.962, "cuda_time_us": 1116.2730000000001, "pct_cuda_time": 1.4045897713494504, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1115.537, "pct_cuda_time": 1.4036636734578831, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.157, "cuda_time_us": 153.214, "pct_cuda_time": 0.19278690537846446, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.214, "pct_cuda_time": 0.19278690537846446, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.477, "cuda_time_us": 559.448, "pct_cuda_time": 0.7039451266866682, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.888, "pct_cuda_time": 0.0023756424174980148, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 557.56, "pct_cuda_time": 0.7015694842691701, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2409.999, "cuda_time_us": 2430.142, "pct_cuda_time": 3.0578116608810704, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.695, "cuda_time_us": 37.471, "pct_cuda_time": 0.047149203933298786, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.471, "pct_cuda_time": 0.047149203933298786, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1742.124, "cuda_time_us": 531.737, "pct_cuda_time": 0.6690767860980624, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.124, "cuda_time_us": 243.613, "pct_cuda_time": 0.3065346272531483, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 242.877, "pct_cuda_time": 0.3056085293615813, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 522.699, "cuda_time_us": 47.135, "pct_cuda_time": 0.059309271900831004, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.135, "pct_cuda_time": 0.059309271900831004, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 720.4, "cuda_time_us": 74.431, "pct_cuda_time": 0.09365542414025145, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.616, "pct_cuda_time": 0.024682522066547172, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.343, "pct_cuda_time": 0.06712070629057025, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0018521957831340457, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 197.623, "cuda_time_us": 166.558, "pct_cuda_time": 0.2095774628038318, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 165.822, "pct_cuda_time": 0.20865136491226474, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.81, "cuda_time_us": 36.543, "pct_cuda_time": 0.04598151528740994, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.543, "pct_cuda_time": 0.04598151528740994, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.779, "cuda_time_us": 1824.391, "pct_cuda_time": 2.2956041555622995, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.011, "cuda_time_us": 1112.977, "pct_cuda_time": 1.4004424634002588, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.0009651047321085687, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1112.21, "pct_cuda_time": 1.3994773586681502, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 91.563, "cuda_time_us": 152.958, "pct_cuda_time": 0.192464784372702, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 152.958, "pct_cuda_time": 0.192464784372702, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.036, "cuda_time_us": 558.456, "pct_cuda_time": 0.7026969077893387, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 557.72, "pct_cuda_time": 0.7017708098977717, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2560.612, "cuda_time_us": 2422.593, "pct_cuda_time": 3.048312866066615, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.514, "cuda_time_us": 37.184, "pct_cuda_time": 0.046788076086994805, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.184, "pct_cuda_time": 0.046788076086994805, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1840.23, "cuda_time_us": 526.073, "pct_cuda_time": 0.6619498588455685, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.521, "cuda_time_us": 241.789, "pct_cuda_time": 0.30423951508709085, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 241.053, "pct_cuda_time": 0.30331341719552385, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.73, "cuda_time_us": 46.655, "pct_cuda_time": 0.05870529501502643, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 46.655, "pct_cuda_time": 0.05870529501502643, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 823.657, "cuda_time_us": 73.05499999999999, "pct_cuda_time": 0.09192402373427833, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.04, "pct_cuda_time": 0.023957749803581677, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 52.543, "pct_cuda_time": 0.06611407814756261, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.0018521957831340457, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 231.098, "cuda_time_us": 164.57399999999998, "pct_cuda_time": 0.20708102500917283, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 163.838, "pct_cuda_time": 0.20615492711760583, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.014, "cuda_time_us": 36.064, "pct_cuda_time": 0.04537879668678412, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.064, "pct_cuda_time": 0.04537879668678412, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 487.448, "cuda_time_us": 1823.272, "pct_cuda_time": 2.2941961344472674, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.806, "cuda_time_us": 1113.201, "pct_cuda_time": 1.400724319280301, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1112.465, "pct_cuda_time": 1.3997982213887337, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.294, "cuda_time_us": 153.566, "pct_cuda_time": 0.1932298217613878, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.566, "pct_cuda_time": 0.1932298217613878, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.035, "cuda_time_us": 556.505, "pct_cuda_time": 0.7002419934055789, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 555.769, "pct_cuda_time": 0.6993158955140119, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2508.015, "cuda_time_us": 2429.536, "pct_cuda_time": 3.0570491400627424, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.373, "cuda_time_us": 38.272, "pct_cuda_time": 0.04815709036148518, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.272, "pct_cuda_time": 0.04815709036148518, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1730.322, "cuda_time_us": 530.361, "pct_cuda_time": 0.6673453856920893, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.083, "cuda_time_us": 243.581, "pct_cuda_time": 0.306494362127428, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 242.845, "pct_cuda_time": 0.305568264235861, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 501.517, "cuda_time_us": 47.007, "pct_cuda_time": 0.05914821139794979, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.007, "pct_cuda_time": 0.05914821139794979, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 715.467, "cuda_time_us": 73.759, "pct_cuda_time": 0.09280985650012505, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.328, "pct_cuda_time": 0.024320135935064425, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 52.927, "pct_cuda_time": 0.06659725965620628, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.001892460908854351, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.367, "cuda_time_us": 166.01399999999998, "pct_cuda_time": 0.20889295566658656, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 165.278, "pct_cuda_time": 0.20796685777501955, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.09, "cuda_time_us": 36.991, "pct_cuda_time": 0.04654522704749421, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.991, "pct_cuda_time": 0.04654522704749421, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 522.663, "cuda_time_us": 1823.912, "pct_cuda_time": 2.2950014369616736, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.999, "cuda_time_us": 1114.257, "pct_cuda_time": 1.4020530684290708, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1113.489, "pct_cuda_time": 1.4010867054117835, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 135.344, "cuda_time_us": 153.31, "pct_cuda_time": 0.19290770075562538, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.31, "pct_cuda_time": 0.19290770075562538, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 173.411, "cuda_time_us": 556.345, "pct_cuda_time": 0.7000406677769774, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 555.577, "pct_cuda_time": 0.69907430475969, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2535.721, "cuda_time_us": 2435.5240000000003, "pct_cuda_time": 3.0645837517131547, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.994, "cuda_time_us": 38.432, "pct_cuda_time": 0.048358415990086716, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.432, "pct_cuda_time": 0.048358415990086716, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1782.034, "cuda_time_us": 531.804, "pct_cuda_time": 0.6691610912050394, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.837, "cuda_time_us": 242.654, "pct_cuda_time": 0.3053279317667179, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.769, "pct_cuda_time": 0.0009676213024660878, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 241.885, "pct_cuda_time": 0.30436031046425177, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 492.385, "cuda_time_us": 47.168, "pct_cuda_time": 0.05935079531173008, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.168, "pct_cuda_time": 0.05935079531173008, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 760.723, "cuda_time_us": 74.07900000000001, "pct_cuda_time": 0.09321250775732812, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.231, "pct_cuda_time": 0.02419808227272475, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.088, "pct_cuda_time": 0.06679984356998656, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.76, "pct_cuda_time": 0.002214581914616794, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 225.56, "cuda_time_us": 167.903, "pct_cuda_time": 0.21126985636926335, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 167.166, "pct_cuda_time": 0.21034250019251757, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.766, "cuda_time_us": 36.896, "pct_cuda_time": 0.04642568995551206, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.896, "pct_cuda_time": 0.04642568995551206, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 515.285, "cuda_time_us": 1828.3920000000003, "pct_cuda_time": 2.3006385545625165, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 171.354, "cuda_time_us": 1117.1370000000002, "pct_cuda_time": 1.4056769297438985, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1116.401, "pct_cuda_time": 1.4047508318523314, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.369, "cuda_time_us": 153.502, "pct_cuda_time": 0.1931492915099472, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 153.502, "pct_cuda_time": 0.1931492915099472, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 176.46, "cuda_time_us": 557.753, "pct_cuda_time": 0.7018123333086709, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 556.985, "pct_cuda_time": 0.7008459702913835, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2493.806, "cuda_time_us": 2427.809, "pct_cuda_time": 3.054876081559025, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.798, "cuda_time_us": 38.4, "pct_cuda_time": 0.04831815086436641, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.4, "pct_cuda_time": 0.04831815086436641, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1753.955, "cuda_time_us": 529.977, "pct_cuda_time": 0.6668622041834457, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.06, "cuda_time_us": 242.36499999999998, "pct_cuda_time": 0.3049642873500564, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 241.629, "pct_cuda_time": 0.30403818945848937, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.59, "cuda_time_us": 47.2, "pct_cuda_time": 0.05939106043745038, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.2, "pct_cuda_time": 0.05939106043745038, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 742.643, "cuda_time_us": 73.887, "pct_cuda_time": 0.09297091700300628, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.199, "pct_cuda_time": 0.024157817147004445, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.408, "pct_cuda_time": 0.06720249482718961, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.0016106050288122139, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 206.162, "cuda_time_us": 166.52499999999998, "pct_cuda_time": 0.2095359393929327, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 165.789, "pct_cuda_time": 0.2086098415013657, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.542, "cuda_time_us": 36.032, "pct_cuda_time": 0.04533853156106381, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.032, "pct_cuda_time": 0.04533853156106381, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 486.786, "cuda_time_us": 1823.4, "pct_cuda_time": 2.2943571949501487, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.575, "cuda_time_us": 1113.074, "pct_cuda_time": 1.4005645170625984, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1112.338, "pct_cuda_time": 1.3996384191710314, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 106.743, "cuda_time_us": 152.638, "pct_cuda_time": 0.19206213311549897, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 152.638, "pct_cuda_time": 0.19206213311549897, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.012, "cuda_time_us": 557.688, "pct_cuda_time": 0.7017305447720514, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 556.952, "pct_cuda_time": 0.7008044468804844, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2468.642, "cuda_time_us": 2440.993, "pct_cuda_time": 3.07146531335579, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.858, "cuda_time_us": 37.344, "pct_cuda_time": 0.04698940171559633, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.344, "pct_cuda_time": 0.04698940171559633, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1707.143, "cuda_time_us": 530.585, "pct_cuda_time": 0.6676272415721316, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.951, "cuda_time_us": 243.101, "pct_cuda_time": 0.3058903852416234, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 242.333, "pct_cuda_time": 0.30492402222433607, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 474.982, "cuda_time_us": 47.039, "pct_cuda_time": 0.05918847652367009, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.039, "pct_cuda_time": 0.05918847652367009, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 720.138, "cuda_time_us": 74.111, "pct_cuda_time": 0.09325277288304842, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.36, "pct_cuda_time": 0.02436040106078473, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 53.439, "pct_cuda_time": 0.06724150166773116, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0016508701545325192, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 211.866, "cuda_time_us": 166.334, "pct_cuda_time": 0.20929560692378968, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 165.598, "pct_cuda_time": 0.20836950903222262, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 112.055, "cuda_time_us": 35.968, "pct_cuda_time": 0.04525800130962321, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 35.968, "pct_cuda_time": 0.04525800130962321, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 500.187, "cuda_time_us": 1837.096, "pct_cuda_time": 2.3115906687584395, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 188.831, "cuda_time_us": 1113.905, "pct_cuda_time": 1.4016101520461475, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1113.137, "pct_cuda_time": 1.40064378902886, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.55, "cuda_time_us": 152.766, "pct_cuda_time": 0.19222319361838017, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 152.766, "pct_cuda_time": 0.19222319361838017, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.185, "cuda_time_us": 570.4250000000001, "pct_cuda_time": 0.7177573230939118, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 569.657, "pct_cuda_time": 0.7167909600766245, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2830.089, "cuda_time_us": 2514.239, "pct_cuda_time": 3.163629669559212, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.211, "cuda_time_us": 37.407, "pct_cuda_time": 0.04706867368185818, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.407, "pct_cuda_time": 0.04706867368185818, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2119.455, "cuda_time_us": 558.7139999999999, "pct_cuda_time": 0.7030215453654587, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 169.544, "cuda_time_us": 253.213, "pct_cuda_time": 0.3186141649692399, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 252.477, "pct_cuda_time": 0.3176880670776729, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 536.678, "cuda_time_us": 48.511, "pct_cuda_time": 0.06104067230680414, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.511, "pct_cuda_time": 0.06104067230680414, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1038.557, "cuda_time_us": 76.92699999999999, "pct_cuda_time": 0.09679610394643527, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.616, "pct_cuda_time": 0.024682522066547172, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.775, "pct_cuda_time": 0.07018085584531344, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0019327260345746566, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 220.939, "cuda_time_us": 180.063, "pct_cuda_time": 0.2265706041429794, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 179.326, "pct_cuda_time": 0.22564324796623358, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.397, "cuda_time_us": 36.639, "pct_cuda_time": 0.046102310664570864, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.639, "pct_cuda_time": 0.046102310664570864, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 469.187, "cuda_time_us": 1881.479, "pct_cuda_time": 2.367437139847324, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.219, "cuda_time_us": 1151.729, "pct_cuda_time": 1.4492035306475484, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1150.993, "pct_cuda_time": 1.4482774327559813, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.498, "cuda_time_us": 155.998, "pct_cuda_time": 0.19628997131613102, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.998, "pct_cuda_time": 0.19628997131613102, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.805, "cuda_time_us": 573.752, "pct_cuda_time": 0.7219436378836446, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 573.016, "pct_cuda_time": 0.7210175399920776, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2532.447, "cuda_time_us": 2504.1919999999996, "pct_cuda_time": 3.150987678368214, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.089, "cuda_time_us": 39.136, "pct_cuda_time": 0.04924424875593343, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 39.136, "pct_cuda_time": 0.04924424875593343, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1814.469, "cuda_time_us": 552.2819999999999, "pct_cuda_time": 0.6949282550956772, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.592, "cuda_time_us": 252.796, "pct_cuda_time": 0.3180894600496971, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 252.028, "pct_cuda_time": 0.3171230970324098, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 509.437, "cuda_time_us": 48.896, "pct_cuda_time": 0.061525112100626556, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.896, "pct_cuda_time": 0.061525112100626556, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 768.865, "cuda_time_us": 77.503, "pct_cuda_time": 0.09752087620940078, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.775, "pct_cuda_time": 0.024882589409969937, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.064, "pct_cuda_time": 0.07054450026197495, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.664, "pct_cuda_time": 0.0020937865374558774, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 229.433, "cuda_time_us": 173.087, "pct_cuda_time": 0.2177928067359528, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 172.35, "pct_cuda_time": 0.21686545055920703, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.716, "cuda_time_us": 36.992, "pct_cuda_time": 0.04654648533267297, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.992, "pct_cuda_time": 0.04654648533267297, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 478.163, "cuda_time_us": 1875.7819999999997, "pct_cuda_time": 2.3602686891839304, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.179, "cuda_time_us": 1147.7279999999998, "pct_cuda_time": 1.4441691316473313, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.0009248396063882632, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1146.993, "pct_cuda_time": 1.4432442920409432, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.328, "cuda_time_us": 155.454, "pct_cuda_time": 0.19560546417888583, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.454, "pct_cuda_time": 0.19560546417888583, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.096, "cuda_time_us": 572.6, "pct_cuda_time": 0.7204940933577137, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 571.864, "pct_cuda_time": 0.7195679954661467, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2491.145, "cuda_time_us": 2508.8959999999997, "pct_cuda_time": 3.1569066518490994, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.098, "cuda_time_us": 38.079, "pct_cuda_time": 0.047914241321984595, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.079, "pct_cuda_time": 0.047914241321984595, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1791.01, "cuda_time_us": 552.3449999999999, "pct_cuda_time": 0.695007527061939, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 172.224, "cuda_time_us": 253.85299999999998, "pct_cuda_time": 0.31941946748364597, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 253.117, "pct_cuda_time": 0.3184933695920789, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 553.141, "cuda_time_us": 48.511, "pct_cuda_time": 0.06104067230680414, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.511, "pct_cuda_time": 0.06104067230680414, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 712.645, "cuda_time_us": 76.991, "pct_cuda_time": 0.0968766341978759, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 20.096, "pct_cuda_time": 0.025286498952351754, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.455, "pct_cuda_time": 0.06977820458811039, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.0018119306574137402, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.075, "cuda_time_us": 172.98999999999998, "pct_cuda_time": 0.2176707530736131, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 172.254, "pct_cuda_time": 0.21674465518204614, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.115, "cuda_time_us": 35.552, "pct_cuda_time": 0.04473455467525923, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 35.552, "pct_cuda_time": 0.04473455467525923, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.095, "cuda_time_us": 1882.92, "pct_cuda_time": 2.3692503287899167, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.603, "cuda_time_us": 1149.3300000000002, "pct_cuda_time": 1.4461849045037045, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1148.593, "pct_cuda_time": 1.4452575483269587, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.121, "cuda_time_us": 155.806, "pct_cuda_time": 0.1960483805618092, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.806, "pct_cuda_time": 0.1960483805618092, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.88, "cuda_time_us": 577.784, "pct_cuda_time": 0.7270170437244031, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 577.048, "pct_cuda_time": 0.7260909458328362, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2434.587, "cuda_time_us": 2500.35, "pct_cuda_time": 3.14615334671142, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.482, "cuda_time_us": 38.559, "pct_cuda_time": 0.04851821820778918, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.559, "pct_cuda_time": 0.04851821820778918, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1758.238, "cuda_time_us": 552.184, "pct_cuda_time": 0.6948049431481589, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 173.836, "cuda_time_us": 254.013, "pct_cuda_time": 0.3196207931122475, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 253.245, "pct_cuda_time": 0.3186544300949602, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 540.433, "cuda_time_us": 48.543, "pct_cuda_time": 0.06108093743252444, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.543, "pct_cuda_time": 0.06108093743252444, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 700.532, "cuda_time_us": 76.959, "pct_cuda_time": 0.09683636907215559, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.551, "pct_cuda_time": 0.0246007335299278, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.904, "pct_cuda_time": 0.07034317463337343, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.001892460908854351, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 197.76, "cuda_time_us": 172.66899999999998, "pct_cuda_time": 0.21726684353123132, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 171.933, "pct_cuda_time": 0.21634074563966432, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.965, "cuda_time_us": 37.024, "pct_cuda_time": 0.04658675045839328, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.024, "pct_cuda_time": 0.04658675045839328, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.525, "cuda_time_us": 1872.583, "pct_cuda_time": 2.3562434348970793, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.955, "cuda_time_us": 1142.449, "pct_cuda_time": 1.43752664418866, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1141.681, "pct_cuda_time": 1.4365602811713725, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.082, "cuda_time_us": 156.158, "pct_cuda_time": 0.19649129694473255, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 156.158, "pct_cuda_time": 0.19649129694473255, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.049, "cuda_time_us": 573.976, "pct_cuda_time": 0.7222254937636868, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 573.24, "pct_cuda_time": 0.7212993958721198, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2576.813, "cuda_time_us": 2518.1130000000003, "pct_cuda_time": 3.168504266341727, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.505, "cuda_time_us": 37.792, "pct_cuda_time": 0.04755311347568061, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.792, "pct_cuda_time": 0.04755311347568061, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1855.644, "cuda_time_us": 559.706, "pct_cuda_time": 0.7042697642627882, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 170.379, "cuda_time_us": 253.50099999999998, "pct_cuda_time": 0.3189765511007226, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 252.765, "pct_cuda_time": 0.3180504532091556, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 531.839, "cuda_time_us": 47.935, "pct_cuda_time": 0.06031590004383865, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.935, "pct_cuda_time": 0.06031590004383865, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 781.076, "cuda_time_us": 78.24000000000001, "pct_cuda_time": 0.09844823238614657, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 20.096, "pct_cuda_time": 0.025286498952351754, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.447, "pct_cuda_time": 0.07102642348543987, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.697, "pct_cuda_time": 0.0021353099483549425, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 214.694, "cuda_time_us": 180.03, "pct_cuda_time": 0.2265290807320803, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 179.294, "pct_cuda_time": 0.22560298284051333, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.596, "cuda_time_us": 36.351, "pct_cuda_time": 0.04573992453308811, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.351, "pct_cuda_time": 0.04573992453308811, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 479.082, "cuda_time_us": 1884.2640000000001, "pct_cuda_time": 2.3709414640701696, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.532, "cuda_time_us": 1155.025, "pct_cuda_time": 1.45335083859674, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1154.289, "pct_cuda_time": 1.4524247407051727, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.839, "cuda_time_us": 155.582, "pct_cuda_time": 0.19576652468176703, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.582, "pct_cuda_time": 0.19576652468176703, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 153.45, "cuda_time_us": 573.657, "pct_cuda_time": 0.7218241007916626, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 572.921, "pct_cuda_time": 0.7208980029000955, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2429.602, "cuda_time_us": 2502.4620000000004, "pct_cuda_time": 3.148810845008961, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.02, "cuda_time_us": 38.079, "pct_cuda_time": 0.047914241321984595, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.079, "pct_cuda_time": 0.047914241321984595, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1746.491, "cuda_time_us": 551.577, "pct_cuda_time": 0.6940411640446519, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.028, "cuda_time_us": 253.085, "pct_cuda_time": 0.31845310446635866, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 252.317, "pct_cuda_time": 0.31748674144907135, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 507.875, "cuda_time_us": 48.447, "pct_cuda_time": 0.06096014205536353, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.447, "pct_cuda_time": 0.06096014205536353, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 725.368, "cuda_time_us": 76.767, "pct_cuda_time": 0.09659477831783375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.424, "pct_cuda_time": 0.02444093131222534, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.615, "pct_cuda_time": 0.06997953021671192, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.0021743167888964884, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.102, "cuda_time_us": 173.278, "pct_cuda_time": 0.21803313920509587, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 172.51, "pct_cuda_time": 0.21706677618780856, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.442, "cuda_time_us": 36.575, "pct_cuda_time": 0.046021780413130244, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.575, "pct_cuda_time": 0.046021780413130244, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.786, "cuda_time_us": 1876.2310000000002, "pct_cuda_time": 2.3608336592291943, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.856, "cuda_time_us": 1142.4170000000001, "pct_cuda_time": 1.4374863790629397, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1141.681, "pct_cuda_time": 1.4365602811713725, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.597, "cuda_time_us": 155.39, "pct_cuda_time": 0.1955249339274452, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.39, "pct_cuda_time": 0.1955249339274452, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.014, "cuda_time_us": 578.424, "pct_cuda_time": 0.7278223462388093, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 577.688, "pct_cuda_time": 0.7268962483472422, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2392.861, "cuda_time_us": 2498.4950000000003, "pct_cuda_time": 3.1438192277048223, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.502, "cuda_time_us": 37.471, "pct_cuda_time": 0.047149203933298786, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.471, "pct_cuda_time": 0.047149203933298786, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1705.416, "cuda_time_us": 550.265, "pct_cuda_time": 0.6923902938901193, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.147, "cuda_time_us": 252.541, "pct_cuda_time": 0.31776859732911344, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 251.805, "pct_cuda_time": 0.3168424994375465, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 500.598, "cuda_time_us": 47.807, "pct_cuda_time": 0.060154839540957426, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 47.807, "pct_cuda_time": 0.060154839540957426, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 717.205, "cuda_time_us": 77.055, "pct_cuda_time": 0.09695716444931651, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.68, "pct_cuda_time": 0.024763052317987783, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.871, "pct_cuda_time": 0.07030165122247437, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.001892460908854351, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.63, "cuda_time_us": 172.862, "pct_cuda_time": 0.21750969257073194, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 172.126, "pct_cuda_time": 0.2165835946791649, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.022, "cuda_time_us": 36.383, "pct_cuda_time": 0.045780189658808416, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.383, "pct_cuda_time": 0.045780189658808416, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.642, "cuda_time_us": 1874.3760000000002, "pct_cuda_time": 2.3584995402225957, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.524, "cuda_time_us": 1142.1930000000002, "pct_cuda_time": 1.4372045231828976, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1141.457, "pct_cuda_time": 1.4362784252913305, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.666, "cuda_time_us": 155.294, "pct_cuda_time": 0.19540413855028432, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.294, "pct_cuda_time": 0.19540413855028432, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.881, "cuda_time_us": 576.889, "pct_cuda_time": 0.7258908784894134, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 576.153, "pct_cuda_time": 0.7249647805978463, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2762.297, "cuda_time_us": 2506.622, "pct_cuda_time": 3.1540453113526006, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.206, "cuda_time_us": 38.56, "pct_cuda_time": 0.048519476492967936, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.56, "pct_cuda_time": 0.048519476492967936, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2050.778, "cuda_time_us": 553.752, "pct_cuda_time": 0.6967779343084538, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 157.9, "cuda_time_us": 253.69299999999998, "pct_cuda_time": 0.31921814185504443, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 252.957, "pct_cuda_time": 0.31829204396347743, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 562.454, "cuda_time_us": 48.671, "pct_cuda_time": 0.06124199793540566, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.671, "pct_cuda_time": 0.06124199793540566, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 932.74, "cuda_time_us": 76.863, "pct_cuda_time": 0.09671557369499466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.648, "pct_cuda_time": 0.02472278719226748, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.871, "pct_cuda_time": 0.07030165122247437, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0016911352802528245, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 251.419, "cuda_time_us": 174.52499999999998, "pct_cuda_time": 0.219602220823009, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 173.789, "pct_cuda_time": 0.218676122931442, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.938, "cuda_time_us": 36.448, "pct_cuda_time": 0.04586197819542778, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.448, "pct_cuda_time": 0.04586197819542778, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 481.026, "cuda_time_us": 1877.8619999999999, "pct_cuda_time": 2.362885922355751, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.659, "cuda_time_us": 1149.904, "pct_cuda_time": 1.4469071601963122, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1149.136, "pct_cuda_time": 1.445940797179025, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.108, "cuda_time_us": 155.581, "pct_cuda_time": 0.19576526639658826, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.581, "pct_cuda_time": 0.19576526639658826, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.031, "cuda_time_us": 572.377, "pct_cuda_time": 0.7202134957628502, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 571.641, "pct_cuda_time": 0.7192873978712833, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2489.849, "cuda_time_us": 2504.5170000000003, "pct_cuda_time": 3.151396621051312, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.116, "cuda_time_us": 38.08, "pct_cuda_time": 0.047915499607163355, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.08, "pct_cuda_time": 0.047915499607163355, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1797.87, "cuda_time_us": 553.403, "pct_cuda_time": 0.6963387927810668, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.838, "cuda_time_us": 254.237, "pct_cuda_time": 0.31990264899228965, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 253.469, "pct_cuda_time": 0.31893628597500234, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 553.505, "cuda_time_us": 48.16, "pct_cuda_time": 0.06059901420905953, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.16, "pct_cuda_time": 0.06059901420905953, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 739.181, "cuda_time_us": 77.18299999999999, "pct_cuda_time": 0.09711822495219771, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.647, "pct_cuda_time": 0.02472152890708872, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.0, "pct_cuda_time": 0.07046397001053434, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.0019327260345746566, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 209.97, "cuda_time_us": 173.823, "pct_cuda_time": 0.21871890462751986, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 173.086, "pct_cuda_time": 0.21779154845077406, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.497, "cuda_time_us": 36.832, "pct_cuda_time": 0.04634515970407145, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.832, "pct_cuda_time": 0.04634515970407145, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.778, "cuda_time_us": 1876.2020000000002, "pct_cuda_time": 2.36079716895901, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.391, "cuda_time_us": 1148.275, "pct_cuda_time": 1.444857413640113, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0009273561767457825, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1147.538, "pct_cuda_time": 1.4439300574633673, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.381, "cuda_time_us": 155.102, "pct_cuda_time": 0.19516254779596248, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.102, "pct_cuda_time": 0.19516254779596248, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.651, "cuda_time_us": 572.825, "pct_cuda_time": 0.7207772075229346, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 572.089, "pct_cuda_time": 0.7198511096313676, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2461.478, "cuda_time_us": 2503.679, "pct_cuda_time": 3.1503421780715115, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.713, "cuda_time_us": 37.663, "pct_cuda_time": 0.04739079468762062, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.663, "pct_cuda_time": 0.04739079468762062, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1754.755, "cuda_time_us": 550.905, "pct_cuda_time": 0.6931955964045254, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.097, "cuda_time_us": 251.99699999999999, "pct_cuda_time": 0.31708409019186823, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 251.261, "pct_cuda_time": 0.3161579923003012, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 514.42, "cuda_time_us": 48.447, "pct_cuda_time": 0.06096014205536353, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.447, "pct_cuda_time": 0.06096014205536353, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 726.899, "cuda_time_us": 77.343, "pct_cuda_time": 0.09731955058079926, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.744, "pct_cuda_time": 0.024843582569428396, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.871, "pct_cuda_time": 0.07030165122247437, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.728, "pct_cuda_time": 0.0021743167888964884, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 223.092, "cuda_time_us": 173.118, "pct_cuda_time": 0.2178318135764944, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 172.382, "pct_cuda_time": 0.21690571568492734, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.414, "cuda_time_us": 36.831, "pct_cuda_time": 0.04634390141889269, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.831, "pct_cuda_time": 0.04634390141889269, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.743, "cuda_time_us": 1878.2800000000002, "pct_cuda_time": 2.3634118855604727, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.935, "cuda_time_us": 1146.258, "pct_cuda_time": 1.442319452434555, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1145.49, "pct_cuda_time": 1.4413530894172677, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.657, "cuda_time_us": 155.325, "pct_cuda_time": 0.19544314539082583, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.325, "pct_cuda_time": 0.19544314539082583, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.295, "cuda_time_us": 576.697, "pct_cuda_time": 0.7256492877350915, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 575.961, "pct_cuda_time": 0.7247231898435246, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2546.448, "cuda_time_us": 2496.542, "pct_cuda_time": 3.1413617967507044, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.2, "cuda_time_us": 38.847, "pct_cuda_time": 0.048880604339271924, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.847, "pct_cuda_time": 0.048880604339271924, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1830.538, "cuda_time_us": 549.9449999999999, "pct_cuda_time": 0.6919876426329162, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.307, "cuda_time_us": 252.31699999999998, "pct_cuda_time": 0.3174867414490713, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 251.581, "pct_cuda_time": 0.3165606435575043, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 510.466, "cuda_time_us": 48.607, "pct_cuda_time": 0.061161467683965055, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.607, "pct_cuda_time": 0.061161467683965055, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 816.578, "cuda_time_us": 76.67099999999999, "pct_cuda_time": 0.09647398294067283, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.84, "pct_cuda_time": 0.02496437794658931, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.519, "pct_cuda_time": 0.069858734839551, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0016508701545325192, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 205.438, "cuda_time_us": 172.35, "pct_cuda_time": 0.21686545055920703, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 171.614, "pct_cuda_time": 0.21593935266764003, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.737, "cuda_time_us": 37.183, "pct_cuda_time": 0.046786817801816045, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.183, "pct_cuda_time": 0.046786817801816045, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 485.701, "cuda_time_us": 1870.567, "pct_cuda_time": 2.3537067319767, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 183.794, "cuda_time_us": 1142.545, "pct_cuda_time": 1.437647439565821, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1141.809, "pct_cuda_time": 1.4367213416742537, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.43, "cuda_time_us": 155.518, "pct_cuda_time": 0.19568599443032644, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.518, "pct_cuda_time": 0.19568599443032644, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.556, "cuda_time_us": 572.504, "pct_cuda_time": 0.7203732979805527, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 571.768, "pct_cuda_time": 0.7194472000889858, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2411.194, "cuda_time_us": 2496.417, "pct_cuda_time": 3.1412045111033593, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.999, "cuda_time_us": 37.632, "pct_cuda_time": 0.04735178784707907, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.632, "pct_cuda_time": 0.04735178784707907, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1703.364, "cuda_time_us": 552.0889999999999, "pct_cuda_time": 0.6946854060561767, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.934, "cuda_time_us": 253.628, "pct_cuda_time": 0.31913635331842505, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 252.892, "pct_cuda_time": 0.31821025542685805, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 493.834, "cuda_time_us": 48.159, "pct_cuda_time": 0.06059775592388078, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.159, "pct_cuda_time": 0.06059775592388078, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 706.916, "cuda_time_us": 77.24799999999999, "pct_cuda_time": 0.09720001348881709, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 19.871, "pct_cuda_time": 0.025003384787130857, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 56.032, "pct_cuda_time": 0.07050423513625464, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.345, "pct_cuda_time": 0.0016923935654315839, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.898, "cuda_time_us": 173.054, "pct_cuda_time": 0.21775128332505375, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 172.318, "pct_cuda_time": 0.21682518543348675, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.117, "cuda_time_us": 36.448, "pct_cuda_time": 0.04586197819542778, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.448, "pct_cuda_time": 0.04586197819542778, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.824, "cuda_time_us": 1870.248, "pct_cuda_time": 2.3533053390046756, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.52, "cuda_time_us": 1140.881, "pct_cuda_time": 1.435553653028365, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1140.145, "pct_cuda_time": 1.434627555136798, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.652, "cuda_time_us": 155.326, "pct_cuda_time": 0.1954444036760046, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.326, "pct_cuda_time": 0.1954444036760046, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.123, "cuda_time_us": 574.0409999999999, "pct_cuda_time": 0.7223072823003062, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 573.305, "pct_cuda_time": 0.7213811844087391, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2525.089, "cuda_time_us": 2496.511, "pct_cuda_time": 3.141322789910163, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.518, "cuda_time_us": 37.6, "pct_cuda_time": 0.04731152272135877, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 37.6, "pct_cuda_time": 0.04731152272135877, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1800.674, "cuda_time_us": 553.367, "pct_cuda_time": 0.6962934945146314, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.621, "cuda_time_us": 253.053, "pct_cuda_time": 0.31841283934063835, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 252.317, "pct_cuda_time": 0.31748674144907135, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[3584, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 544.333, "cuda_time_us": 48.639, "pct_cuda_time": 0.061201732809685355, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 48.639, "pct_cuda_time": 0.061201732809685355, "trace": "_C::rotary_embedding(int64[3584], bfloat16[3584, 4096], bfloat16[3584, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 755.704, "cuda_time_us": 77.598, "pct_cuda_time": 0.09764041330138293, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 20.159, "pct_cuda_time": 0.025365770918613605, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[3584], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 55.904, "pct_cuda_time": 0.07034317463337343, "trace": "_vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.535, "pct_cuda_time": 0.0019314677493958965, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], None, None, bfloat16[3584, 32, 128], int32[15], int32[15], None, None, None, 256, 256, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[3584, 32, 128], bfloat16[3584, 8, 128], bfloat16[3584, 8, 128], bfloat16[3584, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.066, "cuda_time_us": 174.077, "pct_cuda_time": 0.21903850906292477, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 173.341, "pct_cuda_time": 0.21811241117135777, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[3584, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 98.39, "cuda_time_us": 36.256, "pct_cuda_time": 0.045620387441105956, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 36.256, "pct_cuda_time": 0.045620387441105956, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 478.639, "cuda_time_us": 1869.288, "pct_cuda_time": 2.3520973852330664, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 179.331, "cuda_time_us": 1139.633, "pct_cuda_time": 1.433983313125273, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 1138.865, "pct_cuda_time": 1.4330169501079857, "trace": "mm(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[3584, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[3584, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.202, "cuda_time_us": 155.006, "pct_cuda_time": 0.19504175241880156, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 155.006, "pct_cuda_time": 0.19504175241880156, "trace": "_C::silu_and_mul(bfloat16[3584, 14336], bfloat16[3584, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.812, "cuda_time_us": 574.649, "pct_cuda_time": 0.723072319688992, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0009663630172873283, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 573.881, "pct_cuda_time": 0.7221059566717046, "trace": "mm(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[3584, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[3584, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.881, "cuda_time_us": 38.176, "pct_cuda_time": 0.048036294984324275, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 38.176, "pct_cuda_time": 0.048036294984324275, "trace": "_C::fused_add_rms_norm(bfloat16[3584, 4096], bfloat16[3584, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 471.15, "cuda_time_us": 365.915, "pct_cuda_time": 0.46042542118579777, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.012602984350455572, "trace": "index_select(bfloat16[3584, 4096], 0, int64[14])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0009260978915670229, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[14, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 355.163, "pct_cuda_time": 0.44689633894377523, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[14, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3971.949, "cuda_time_us": 149.407, "pct_cuda_time": 0.1879966137029269, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.003986247446310229, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.0030601495547432057, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0031809449319041222, "trace": "copy_(int32[14], int32[14], True) <- _to_copy(int32[14], 3, 0, None, None, True, None) <- to(int32[14], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.0031406798061838167, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.0032212100576244278, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.463, "pct_cuda_time": 0.0030991563952847516, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 2.465, "pct_cuda_time": 0.0031016729656422708, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 7.328, "pct_cuda_time": 0.009220713789949923, "trace": "copy_(float32[14, 128256], bfloat16[14, 128256], False) <- _to_copy(bfloat16[14, 128256], 6, None, None, None, False, None) <- to(bfloat16[14, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 10.368, "pct_cuda_time": 0.013045900733378931, "trace": "div_(float32[14, 128256], bfloat16[14, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 37.248, "pct_cuda_time": 0.04686860633843541, "trace": "_softmax(float32[14, 128256], -1, False) <- softmax(float32[14, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 30.048, "pct_cuda_time": 0.037808953051366716, "trace": "_log_softmax(float32[14, 128256], -1, False) <- log_softmax(float32[14, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.888, "pct_cuda_time": 0.0023756424174980148, "trace": "copy_(int64[14], int32[14], False) <- _to_copy(int32[14], 4, None, None, None, False, None) <- to(int32[14], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 10.784, "pct_cuda_time": 0.013569347367742901, "trace": "index(float32[14, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 30.079, "pct_cuda_time": 0.03784795989190826, "trace": "argmax(float32[14, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 3.552, "pct_cuda_time": 0.004469428954953893, "trace": "copy_(int64[14], int64[14], False) <- _to_copy(int64[14], 4, 0, None, None, False, None) <- to(int64[14], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] }, "decode_1": { "metadata": { "num_running_seqs": 14 }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 6455.811, "pct_cuda_time": 93.10397491772402, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 11.2, "pct_cuda_time": 0.16152339637553037, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 11.2, "pct_cuda_time": 0.16152339637553037, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 6441.4749999999985, "pct_cuda_time": 92.89722497036333, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 203.13100000000009, "pct_cuda_time": 2.92950080617481, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.511, "pct_cuda_time": 0.0650564322366087, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 198.6200000000001, "pct_cuda_time": 2.8644443739382015, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 1925.8909999999994, "pct_cuda_time": 27.774683515095223, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 702.101, "pct_cuda_time": 10.125512332022879, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 702.101, "pct_cuda_time": 10.125512332022879, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 121.31000000000004, "pct_cuda_time": 1.7495002869924638, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 121.31000000000004, "pct_cuda_time": 1.7495002869924638, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 578.613, "pct_cuda_time": 8.344601513128104, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 81.11700000000002, "pct_cuda_time": 1.1698476199815984, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 452.4080000000002, "pct_cuda_time": 6.524506848880445, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 45.08800000000001, "pct_cuda_time": 0.6502470442660638, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 523.8670000000001, "pct_cuda_time": 7.5550693829517845, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 523.8670000000001, "pct_cuda_time": 7.5550693829517845, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 4312.453, "pct_cuda_time": 62.19304064909331, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 2611.0359999999996, "pct_cuda_time": 37.655660962391, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2611.0359999999996, "pct_cuda_time": 37.655660962391, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 290.4629999999999, "pct_cuda_time": 4.188979489413006, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 290.4629999999999, "pct_cuda_time": 4.188979489413006, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 1410.954, "pct_cuda_time": 20.348400197289294, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 1410.954, "pct_cuda_time": 20.348400197289294, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 350.491, "pct_cuda_time": 5.054687207058572, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 8.032, "pct_cuda_time": 0.11583534997216607, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.010614394618963425, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 341.723, "pct_cuda_time": 4.928237462467443, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 127.678, "pct_cuda_time": 1.8413378752174077, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.374999999999999, "pct_cuda_time": 0.07751680852843532, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 6.976, "pct_cuda_time": 0.10060600117104462, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 9.728, "pct_cuda_time": 0.14029460713760353, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 34.943, "pct_cuda_time": 0.5039385749598355, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.223, "pct_cuda_time": 0.40702453713451725, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.696, "pct_cuda_time": 0.024459257165437457, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 10.176, "pct_cuda_time": 0.14675554299262472, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 27.936, "pct_cuda_time": 0.4028855001023944, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.625, "pct_cuda_time": 0.03785704602551493, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 80061.032, "cuda_time_us": 6455.811, "pct_cuda_time": 93.10397491772402, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 273.216, "cuda_time_us": 11.2, "pct_cuda_time": 0.16152339637553037, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 11.2, "pct_cuda_time": 0.16152339637553037, "trace": "index_select(bfloat16[128256, 4096], 0, int64[14]) <- embedding(bfloat16[128256, 4096], int64[14], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4042.301, "cuda_time_us": 210.141, "pct_cuda_time": 3.0305971462277075, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 267.716, "cuda_time_us": 4.511, "pct_cuda_time": 0.0650564322366087, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.511, "pct_cuda_time": 0.0650564322366087, "trace": "_C::rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2903.653, "cuda_time_us": 67.55199999999999, "pct_cuda_time": 0.974216827853556, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 477.499, "cuda_time_us": 27.552, "pct_cuda_time": 0.3973475550838047, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 27.552, "pct_cuda_time": 0.3973475550838047, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 887.343, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 999.566, "cuda_time_us": 19.776, "pct_cuda_time": 0.285204168457365, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.03553514720261668, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.84, "pct_cuda_time": 0.22844023201682154, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02122878923792685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 280.364, "cuda_time_us": 16.448, "pct_cuda_time": 0.23720864496292174, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.448, "pct_cuda_time": 0.23720864496292174, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 126.856, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 608.327, "cuda_time_us": 134.91, "pct_cuda_time": 1.9456358397341789, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 223.492, "cuda_time_us": 81.919, "pct_cuda_time": 1.1814138489006314, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.919, "pct_cuda_time": 1.1814138489006314, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 134.342, "cuda_time_us": 9.152, "pct_cuda_time": 0.1319876896097191, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.1319876896097191, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 166.815, "cuda_time_us": 43.839, "pct_cuda_time": 0.6322343012238282, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.839, "pct_cuda_time": 0.6322343012238282, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2498.588, "cuda_time_us": 201.308, "pct_cuda_time": 2.903209989068327, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.605, "cuda_time_us": 3.103, "pct_cuda_time": 0.04475063383511346, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.04475063383511346, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1774.951, "cuda_time_us": 59.48800000000001, "pct_cuda_time": 0.8579199824631742, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.752, "cuda_time_us": 21.312, "pct_cuda_time": 0.30735594853172354, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.312, "pct_cuda_time": 0.30735594853172354, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 538.872, "cuda_time_us": 3.968, "pct_cuda_time": 0.05722543185875933, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.968, "pct_cuda_time": 0.05722543185875933, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.648, "cuda_time_us": 18.016000000000002, "pct_cuda_time": 0.259821920455496, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.465, "pct_cuda_time": 0.03554956893443592, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.271, "pct_cuda_time": 0.20581253479242806, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.682, "cuda_time_us": 16.192, "pct_cuda_time": 0.23351668161719535, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.192, "pct_cuda_time": 0.23351668161719535, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.124, "cuda_time_us": 3.231, "pct_cuda_time": 0.04659661550797666, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.04659661550797666, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.662, "cuda_time_us": 135.486, "pct_cuda_time": 1.953942757262063, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.112, "cuda_time_us": 81.631, "pct_cuda_time": 1.1772603901366891, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.631, "pct_cuda_time": 1.1772603901366891, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.625, "cuda_time_us": 9.151, "pct_cuda_time": 0.13197326787789987, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.151, "pct_cuda_time": 0.13197326787789987, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.678, "cuda_time_us": 44.704, "pct_cuda_time": 0.6447090992474741, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.704, "pct_cuda_time": 0.6447090992474741, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2447.671, "cuda_time_us": 199.77100000000002, "pct_cuda_time": 2.88104378726215, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.727, "cuda_time_us": 3.103, "pct_cuda_time": 0.04475063383511346, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.04475063383511346, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1770.714, "cuda_time_us": 59.391000000000005, "pct_cuda_time": 0.8565210744767077, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.351, "cuda_time_us": 21.408, "pct_cuda_time": 0.3087404347863709, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.408, "pct_cuda_time": 0.3087404347863709, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 530.03, "cuda_time_us": 3.936, "pct_cuda_time": 0.056763936440543526, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.936, "pct_cuda_time": 0.056763936440543526, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 761.541, "cuda_time_us": 17.887, "pct_cuda_time": 0.25796151705081355, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.015, "pct_cuda_time": 0.20212057144670162, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.019382807565063644, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.446, "cuda_time_us": 16.16, "pct_cuda_time": 0.23305518619897952, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.16, "pct_cuda_time": 0.23305518619897952, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.605, "cuda_time_us": 3.071, "pct_cuda_time": 0.04428913841689766, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.04428913841689766, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 435.474, "cuda_time_us": 134.20600000000002, "pct_cuda_time": 1.9354829405334315, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 144.84, "cuda_time_us": 81.215, "pct_cuda_time": 1.1712609496998838, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.215, "pct_cuda_time": 1.1712609496998838, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.366, "cuda_time_us": 8.864, "pct_cuda_time": 0.1278342308457769, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.1278342308457769, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.271, "cuda_time_us": 44.127, "pct_cuda_time": 0.6363877599877704, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.127, "pct_cuda_time": 0.6363877599877704, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2306.303, "cuda_time_us": 202.94, "pct_cuda_time": 2.926746255397333, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.809, "cuda_time_us": 3.232, "pct_cuda_time": 0.046611037239795906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.046611037239795906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1655.314, "cuda_time_us": 60.22200000000001, "pct_cuda_time": 0.8685055336184991, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.236, "cuda_time_us": 21.823, "pct_cuda_time": 0.3147254534913571, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.823, "pct_cuda_time": 0.3147254534913571, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 496.987, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 699.364, "cuda_time_us": 18.111, "pct_cuda_time": 0.26119198497832413, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.037842624293695684, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.015, "pct_cuda_time": 0.20212057144670162, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02122878923792685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 175.275, "cuda_time_us": 16.608, "pct_cuda_time": 0.23951612205400075, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.608, "pct_cuda_time": 0.23951612205400075, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.502, "cuda_time_us": 3.105, "pct_cuda_time": 0.044779477298751945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.044779477298751945, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 429.488, "cuda_time_us": 136.381, "pct_cuda_time": 1.9668502072402863, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.587, "cuda_time_us": 82.942, "pct_cuda_time": 1.1961672805517176, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.942, "pct_cuda_time": 1.1961672805517176, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.865, "cuda_time_us": 9.12, "pct_cuda_time": 0.13152619419150327, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.13152619419150327, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.332, "cuda_time_us": 44.319, "pct_cuda_time": 0.6391567324970653, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.319, "pct_cuda_time": 0.6391567324970653, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2362.519, "cuda_time_us": 200.667, "pct_cuda_time": 2.893965658972192, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.87, "cuda_time_us": 3.2, "pct_cuda_time": 0.04614954182158011, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04614954182158011, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1673.401, "cuda_time_us": 59.55, "pct_cuda_time": 0.8588141298359673, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.392, "cuda_time_us": 21.727, "pct_cuda_time": 0.3133409672367097, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.727, "pct_cuda_time": 0.3133409672367097, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 474.866, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 696.152, "cuda_time_us": 18.015, "pct_cuda_time": 0.25980749872367676, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.03553514720261668, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.111, "pct_cuda_time": 0.20350505770134905, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020767293819711048, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.498, "cuda_time_us": 16.128, "pct_cuda_time": 0.23259369078076375, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.128, "pct_cuda_time": 0.23259369078076375, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.197, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.108, "cuda_time_us": 134.781, "pct_cuda_time": 1.9437754363294966, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.492, "cuda_time_us": 81.631, "pct_cuda_time": 1.1772603901366891, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.631, "pct_cuda_time": 1.1772603901366891, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.885, "cuda_time_us": 8.959, "pct_cuda_time": 0.12920429536860506, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.959, "pct_cuda_time": 0.12920429536860506, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.81, "cuda_time_us": 44.191, "pct_cuda_time": 0.6373107508242021, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.191, "pct_cuda_time": 0.6373107508242021, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2653.794, "cuda_time_us": 201.30900000000003, "pct_cuda_time": 2.903224410800147, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.488, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1948.746, "cuda_time_us": 60.44800000000001, "pct_cuda_time": 0.8717648450096482, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.86, "cuda_time_us": 21.792, "pct_cuda_time": 0.31427837980496054, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.792, "pct_cuda_time": 0.31427837980496054, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 516.286, "cuda_time_us": 3.808, "pct_cuda_time": 0.05491795476768032, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05491795476768032, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 932.684, "cuda_time_us": 18.048000000000002, "pct_cuda_time": 0.2602834158737118, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.24, "pct_cuda_time": 0.20536546110603146, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 207.639, "cuda_time_us": 16.8, "pct_cuda_time": 0.24228509456329556, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.8, "pct_cuda_time": 0.24228509456329556, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.133, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.715, "cuda_time_us": 134.621, "pct_cuda_time": 1.9414679592384174, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 145.145, "cuda_time_us": 81.63, "pct_cuda_time": 1.17724596840487, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.63, "pct_cuda_time": 1.17724596840487, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.661, "cuda_time_us": 8.992, "pct_cuda_time": 0.12968021251864012, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12968021251864012, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.312, "cuda_time_us": 43.999, "pct_cuda_time": 0.6345417783149072, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.999, "pct_cuda_time": 0.6345417783149072, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2298.199, "cuda_time_us": 200.925, "pct_cuda_time": 2.897686465781557, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.807, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1655.804, "cuda_time_us": 59.934, "pct_cuda_time": 0.8643520748545569, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.935, "cuda_time_us": 21.503, "pct_cuda_time": 0.31011049930919904, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.503, "pct_cuda_time": 0.31011049930919904, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 497.915, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 692.336, "cuda_time_us": 17.696, "pct_cuda_time": 0.255206966273338, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.856, "pct_cuda_time": 0.19982751608744187, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018921312146847842, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.06, "cuda_time_us": 16.959, "pct_cuda_time": 0.2445781499225553, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.959, "pct_cuda_time": 0.2445781499225553, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.926, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 426.383, "cuda_time_us": 134.655, "pct_cuda_time": 1.9419582981202717, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.54, "cuda_time_us": 82.047, "pct_cuda_time": 1.1832598305734945, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.047, "pct_cuda_time": 1.1832598305734945, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.137, "cuda_time_us": 9.024, "pct_cuda_time": 0.1301417079368559, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.1301417079368559, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.136, "cuda_time_us": 43.584, "pct_cuda_time": 0.6285567596099211, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.584, "pct_cuda_time": 0.6285567596099211, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2366.721, "cuda_time_us": 200.637, "pct_cuda_time": 2.8935330070176146, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.922, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1679.378, "cuda_time_us": 59.711, "pct_cuda_time": 0.8611360286588656, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.48, "cuda_time_us": 21.76, "pct_cuda_time": 0.31381688438674477, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.76, "pct_cuda_time": 0.31381688438674477, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.478, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 702.293, "cuda_time_us": 17.791, "pct_cuda_time": 0.2565770307961662, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.495, "pct_cuda_time": 0.03598222088901324, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.824, "pct_cuda_time": 0.19936602066922604, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02122878923792685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.051, "cuda_time_us": 16.384, "pct_cuda_time": 0.23628565412649014, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.384, "pct_cuda_time": 0.23628565412649014, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.945, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.408, "cuda_time_us": 134.654, "pct_cuda_time": 1.9419438763884522, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.618, "cuda_time_us": 81.247, "pct_cuda_time": 1.1717224451180996, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.247, "pct_cuda_time": 1.1717224451180996, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.077, "cuda_time_us": 8.928, "pct_cuda_time": 0.12875722168220852, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12875722168220852, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.44, "cuda_time_us": 44.479, "pct_cuda_time": 0.6414642095881442, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.479, "pct_cuda_time": 0.6414642095881442, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2315.515, "cuda_time_us": 201.086, "pct_cuda_time": 2.900008364604455, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.005, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1637.49, "cuda_time_us": 59.742999999999995, "pct_cuda_time": 0.8615975240770812, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.456, "cuda_time_us": 21.375, "pct_cuda_time": 0.30826451763633583, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.375, "pct_cuda_time": 0.30826451763633583, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 502.174, "cuda_time_us": 3.712, "pct_cuda_time": 0.05353346851303292, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05353346851303292, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 674.133, "cuda_time_us": 18.496000000000002, "pct_cuda_time": 0.26674435172873306, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03691963345726408, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.24, "pct_cuda_time": 0.20536546110603146, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.024459257165437457, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.643, "cuda_time_us": 16.16, "pct_cuda_time": 0.23305518619897952, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.16, "pct_cuda_time": 0.23305518619897952, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.438, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 445.984, "cuda_time_us": 135.071, "pct_cuda_time": 1.9479577385570772, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 147.2, "cuda_time_us": 81.343, "pct_cuda_time": 1.1731069313727471, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.343, "pct_cuda_time": 1.1731069313727471, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.389, "cuda_time_us": 9.12, "pct_cuda_time": 0.13152619419150327, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.13152619419150327, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.521, "cuda_time_us": 44.608, "pct_cuda_time": 0.6433246129928266, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.608, "pct_cuda_time": 0.6433246129928266, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2454.045, "cuda_time_us": 201.69299999999998, "pct_cuda_time": 2.908762355818736, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.162, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1757.497, "cuda_time_us": 60.254999999999995, "pct_cuda_time": 0.8689814507685341, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 215.501, "cuda_time_us": 22.271, "pct_cuda_time": 0.32118638934637833, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.271, "pct_cuda_time": 0.32118638934637833, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 490.886, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 704.126, "cuda_time_us": 17.92, "pct_cuda_time": 0.25843743420084864, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03507365178440088, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.208, "pct_cuda_time": 0.2049039656878157, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.446, "cuda_time_us": 16.288, "pct_cuda_time": 0.23490116787184276, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.288, "pct_cuda_time": 0.23490116787184276, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.097, "cuda_time_us": 3.263, "pct_cuda_time": 0.04705811092619246, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.263, "pct_cuda_time": 0.04705811092619246, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 462.251, "cuda_time_us": 135.007, "pct_cuda_time": 1.9470347477206453, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.134, "cuda_time_us": 80.927, "pct_cuda_time": 1.1671074909359418, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.927, "pct_cuda_time": 1.1671074909359418, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.479, "cuda_time_us": 9.248, "pct_cuda_time": 0.1333721758643665, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.248, "pct_cuda_time": 0.1333721758643665, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.116, "cuda_time_us": 44.832, "pct_cuda_time": 0.6465550809203373, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.832, "pct_cuda_time": 0.6465550809203373, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2350.75, "cuda_time_us": 200.255, "pct_cuda_time": 2.888023905462664, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.212, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1686.356, "cuda_time_us": 59.072, "pct_cuda_time": 0.8519205420263689, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.828, "cuda_time_us": 21.696, "pct_cuda_time": 0.31289389355031316, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.696, "pct_cuda_time": 0.31289389355031316, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.11, "cuda_time_us": 3.745, "pct_cuda_time": 0.05400938566306796, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.745, "pct_cuda_time": 0.05400938566306796, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 719.6, "cuda_time_us": 17.663, "pct_cuda_time": 0.25473104912330297, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03691963345726408, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.791, "pct_cuda_time": 0.198890103519191, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018921312146847842, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 180.985, "cuda_time_us": 15.968, "pct_cuda_time": 0.23028621368968472, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.968, "pct_cuda_time": 0.23028621368968472, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.403, "cuda_time_us": 3.105, "pct_cuda_time": 0.044779477298751945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.044779477298751945, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 432.0, "cuda_time_us": 134.974, "pct_cuda_time": 1.94655883057061, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.174, "cuda_time_us": 80.991, "pct_cuda_time": 1.1680304817723732, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.991, "pct_cuda_time": 1.1680304817723732, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.581, "cuda_time_us": 9.216, "pct_cuda_time": 0.1329106804461507, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.216, "pct_cuda_time": 0.1329106804461507, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.157, "cuda_time_us": 44.767, "pct_cuda_time": 0.6456176683520864, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.767, "pct_cuda_time": 0.6456176683520864, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2420.726, "cuda_time_us": 201.21299999999997, "pct_cuda_time": 2.9018399245454987, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.734, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1748.085, "cuda_time_us": 60.510999999999996, "pct_cuda_time": 0.8726734141142605, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.212, "cuda_time_us": 22.24, "pct_cuda_time": 0.3207393156599817, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.24, "pct_cuda_time": 0.3207393156599817, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 462.775, "cuda_time_us": 3.871, "pct_cuda_time": 0.05582652387229268, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.871, "pct_cuda_time": 0.05582652387229268, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 709.812, "cuda_time_us": 18.112000000000002, "pct_cuda_time": 0.2612064067101434, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.952, "pct_cuda_time": 0.20121200234208925, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.632, "pct_cuda_time": 0.02353626632900585, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.622, "cuda_time_us": 16.288, "pct_cuda_time": 0.23490116787184276, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.288, "pct_cuda_time": 0.23490116787184276, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.202, "cuda_time_us": 3.103, "pct_cuda_time": 0.04475063383511346, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.04475063383511346, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 439.306, "cuda_time_us": 134.43099999999998, "pct_cuda_time": 1.9387278301927608, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 145.01, "cuda_time_us": 81.759, "pct_cuda_time": 1.1791063718095525, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.759, "pct_cuda_time": 1.1791063718095525, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.659, "cuda_time_us": 8.961, "pct_cuda_time": 0.12923313883224355, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.961, "pct_cuda_time": 0.12923313883224355, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.699, "cuda_time_us": 43.711, "pct_cuda_time": 0.630388319550965, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.711, "pct_cuda_time": 0.630388319550965, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2572.462, "cuda_time_us": 201.341, "pct_cuda_time": 2.903685906218363, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.918, "cuda_time_us": 3.296, "pct_cuda_time": 0.04753402807622751, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04753402807622751, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1895.383, "cuda_time_us": 59.774, "pct_cuda_time": 0.862044597763478, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.522, "cuda_time_us": 21.791, "pct_cuda_time": 0.3142639580731413, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.791, "pct_cuda_time": 0.3142639580731413, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 532.273, "cuda_time_us": 3.648, "pct_cuda_time": 0.052610477676601326, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052610477676601326, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 873.367, "cuda_time_us": 18.047, "pct_cuda_time": 0.26026899414189253, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.431, "pct_cuda_time": 0.03505923005258164, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.144, "pct_cuda_time": 0.20398097485138408, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02122878923792685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.185, "cuda_time_us": 16.288, "pct_cuda_time": 0.23490116787184276, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.288, "pct_cuda_time": 0.23490116787184276, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.622, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.517, "cuda_time_us": 135.103, "pct_cuda_time": 1.9484192339752928, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 151.907, "cuda_time_us": 82.175, "pct_cuda_time": 1.1851058122463578, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.175, "pct_cuda_time": 1.1851058122463578, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.682, "cuda_time_us": 9.44, "pct_cuda_time": 0.1361411483736613, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.44, "pct_cuda_time": 0.1361411483736613, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.596, "cuda_time_us": 43.488, "pct_cuda_time": 0.6271722733552736, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.488, "pct_cuda_time": 0.6271722733552736, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2298.171, "cuda_time_us": 200.50799999999998, "pct_cuda_time": 2.8916726036129323, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.354, "cuda_time_us": 3.105, "pct_cuda_time": 0.044779477298751945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.044779477298751945, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1628.379, "cuda_time_us": 60.222, "pct_cuda_time": 0.8685055336184991, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.596, "cuda_time_us": 21.696, "pct_cuda_time": 0.31289389355031316, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.696, "pct_cuda_time": 0.31289389355031316, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 488.188, "cuda_time_us": 3.936, "pct_cuda_time": 0.056763936440543526, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.936, "pct_cuda_time": 0.056763936440543526, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 685.653, "cuda_time_us": 18.078, "pct_cuda_time": 0.2607160678282891, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.463, "pct_cuda_time": 0.03552072547079744, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.304, "pct_cuda_time": 0.20628845194246306, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.311, "pct_cuda_time": 0.0189068904150286, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 171.551, "cuda_time_us": 16.512, "pct_cuda_time": 0.23813163579935334, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.512, "pct_cuda_time": 0.23813163579935334, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.948, "cuda_time_us": 3.103, "pct_cuda_time": 0.04475063383511346, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.04475063383511346, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.38, "cuda_time_us": 134.078, "pct_cuda_time": 1.933636958860568, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.729, "cuda_time_us": 81.407, "pct_cuda_time": 1.1740299222091786, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.407, "pct_cuda_time": 1.1740299222091786, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.527, "cuda_time_us": 8.896, "pct_cuda_time": 0.12829572626399272, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.12829572626399272, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.282, "cuda_time_us": 43.775, "pct_cuda_time": 0.6313113103873965, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.775, "pct_cuda_time": 0.6313113103873965, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2333.564, "cuda_time_us": 199.58300000000003, "pct_cuda_time": 2.8783325016801324, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.359, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1660.684, "cuda_time_us": 59.52, "pct_cuda_time": 0.8583814778813901, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 177.59, "cuda_time_us": 21.856, "pct_cuda_time": 0.31520137064139214, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.856, "pct_cuda_time": 0.31520137064139214, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 471.444, "cuda_time_us": 3.936, "pct_cuda_time": 0.056763936440543526, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.936, "pct_cuda_time": 0.056763936440543526, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 681.435, "cuda_time_us": 17.6, "pct_cuda_time": 0.25382248001869057, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.792, "pct_cuda_time": 0.19890452525101027, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.699, "cuda_time_us": 16.128, "pct_cuda_time": 0.23259369078076375, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.128, "pct_cuda_time": 0.23259369078076375, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.568, "cuda_time_us": 3.232, "pct_cuda_time": 0.046611037239795906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.046611037239795906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 445.201, "cuda_time_us": 133.75900000000001, "pct_cuda_time": 1.9290364264102293, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.752, "cuda_time_us": 81.311, "pct_cuda_time": 1.1726454359545313, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.311, "pct_cuda_time": 1.1726454359545313, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.209, "cuda_time_us": 8.961, "pct_cuda_time": 0.12923313883224355, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.961, "pct_cuda_time": 0.12923313883224355, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.372, "cuda_time_us": 43.487, "pct_cuda_time": 0.6271578516234545, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.487, "pct_cuda_time": 0.6271578516234545, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2338.234, "cuda_time_us": 200.317, "pct_cuda_time": 2.8889180528354568, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.154, "cuda_time_us": 3.391, "pct_cuda_time": 0.04890409259905566, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.391, "pct_cuda_time": 0.04890409259905566, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1663.63, "cuda_time_us": 59.52, "pct_cuda_time": 0.8583814778813901, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.12, "cuda_time_us": 21.504, "pct_cuda_time": 0.3101249210410183, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.504, "pct_cuda_time": 0.3101249210410183, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 511.677, "cuda_time_us": 3.808, "pct_cuda_time": 0.05491795476768032, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05491795476768032, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 690.986, "cuda_time_us": 17.824, "pct_cuda_time": 0.2570529479462012, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.035996642620832485, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.856, "pct_cuda_time": 0.19982751608744187, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02122878923792685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 177.285, "cuda_time_us": 16.384, "pct_cuda_time": 0.23628565412649014, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.384, "pct_cuda_time": 0.23628565412649014, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.929, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.122, "cuda_time_us": 134.238, "pct_cuda_time": 1.935944435951647, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 150.252, "cuda_time_us": 81.695, "pct_cuda_time": 1.1781833809731208, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.695, "pct_cuda_time": 1.1781833809731208, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.268, "cuda_time_us": 9.056, "pct_cuda_time": 0.1306032033550717, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.1306032033550717, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.395, "cuda_time_us": 43.487, "pct_cuda_time": 0.6271578516234545, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.487, "pct_cuda_time": 0.6271578516234545, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2261.654, "cuda_time_us": 201.82, "pct_cuda_time": 2.91059391575978, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.182, "cuda_time_us": 3.263, "pct_cuda_time": 0.04705811092619246, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.263, "pct_cuda_time": 0.04705811092619246, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1610.815, "cuda_time_us": 59.519000000000005, "pct_cuda_time": 0.8583670561495708, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.223, "cuda_time_us": 21.44, "pct_cuda_time": 0.3092019302045867, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.44, "pct_cuda_time": 0.3092019302045867, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 483.272, "cuda_time_us": 3.744, "pct_cuda_time": 0.053994963931248724, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.053994963931248724, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 676.083, "cuda_time_us": 18.175, "pct_cuda_time": 0.26211497581475574, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.047, "pct_cuda_time": 0.20258206686491745, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.023074770910790056, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.447, "cuda_time_us": 16.16, "pct_cuda_time": 0.23305518619897952, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.16, "pct_cuda_time": 0.23305518619897952, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.593, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 426.268, "cuda_time_us": 135.934, "pct_cuda_time": 1.9604036931170843, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.273, "cuda_time_us": 82.462, "pct_cuda_time": 1.1892448492784808, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.462, "pct_cuda_time": 1.1892448492784808, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.365, "cuda_time_us": 9.056, "pct_cuda_time": 0.1306032033550717, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.1306032033550717, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.051, "cuda_time_us": 44.416, "pct_cuda_time": 0.6405556404835318, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.416, "pct_cuda_time": 0.6405556404835318, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2370.697, "cuda_time_us": 201.788, "pct_cuda_time": 2.9101324203415646, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.758, "cuda_time_us": 3.137, "pct_cuda_time": 0.045240972716967746, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.045240972716967746, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1690.314, "cuda_time_us": 59.998000000000005, "pct_cuda_time": 0.8652750656909884, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 131.237, "cuda_time_us": 21.568, "pct_cuda_time": 0.3110479118774499, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.568, "pct_cuda_time": 0.3110479118774499, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 513.642, "cuda_time_us": 3.808, "pct_cuda_time": 0.05491795476768032, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05491795476768032, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 697.616, "cuda_time_us": 18.399, "pct_cuda_time": 0.2653454437422664, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.815, "pct_cuda_time": 0.04059717507117125, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.208, "pct_cuda_time": 0.2049039656878157, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.019844302983279445, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.892, "cuda_time_us": 16.223, "pct_cuda_time": 0.23396375530359187, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.223, "pct_cuda_time": 0.23396375530359187, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.132, "cuda_time_us": 3.135, "pct_cuda_time": 0.04521212925332926, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.04521212925332926, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.18, "cuda_time_us": 135.518, "pct_cuda_time": 1.954404252680279, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 146.256, "cuda_time_us": 81.695, "pct_cuda_time": 1.1781833809731208, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.695, "pct_cuda_time": 1.1781833809731208, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.721, "cuda_time_us": 9.184, "pct_cuda_time": 0.1324491850279349, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.1324491850279349, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.912, "cuda_time_us": 44.639, "pct_cuda_time": 0.6437716866792234, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.639, "pct_cuda_time": 0.6437716866792234, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2370.206, "cuda_time_us": 201.37400000000002, "pct_cuda_time": 2.904161823368398, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.718, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1648.87, "cuda_time_us": 60.12700000000001, "pct_cuda_time": 0.8671354690956711, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.568, "cuda_time_us": 21.535, "pct_cuda_time": 0.31057199472741487, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.535, "pct_cuda_time": 0.31057199472741487, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 470.727, "cuda_time_us": 3.744, "pct_cuda_time": 0.053994963931248724, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.053994963931248724, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 687.802, "cuda_time_us": 17.984, "pct_cuda_time": 0.25936042503728024, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.03738112887547989, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.112, "pct_cuda_time": 0.20351947943316825, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.393, "cuda_time_us": 16.864, "pct_cuda_time": 0.2432080853997272, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.864, "pct_cuda_time": 0.2432080853997272, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.1, "cuda_time_us": 3.137, "pct_cuda_time": 0.045240972716967746, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.045240972716967746, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 494.232, "cuda_time_us": 135.038, "pct_cuda_time": 1.9474818214070422, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.96, "cuda_time_us": 81.791, "pct_cuda_time": 1.1795678672277683, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.791, "pct_cuda_time": 1.1795678672277683, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.118, "cuda_time_us": 9.44, "pct_cuda_time": 0.1361411483736613, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.44, "pct_cuda_time": 0.1361411483736613, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 194.702, "cuda_time_us": 43.807, "pct_cuda_time": 0.6317728058056125, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.807, "pct_cuda_time": 0.6317728058056125, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2578.856, "cuda_time_us": 201.46800000000002, "pct_cuda_time": 2.9055174661594068, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.323, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1749.575, "cuda_time_us": 60.35000000000001, "pct_cuda_time": 0.8703515152913625, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.462, "cuda_time_us": 21.76, "pct_cuda_time": 0.31381688438674477, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.76, "pct_cuda_time": 0.31381688438674477, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 483.706, "cuda_time_us": 3.679, "pct_cuda_time": 0.053057551362997876, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.679, "pct_cuda_time": 0.053057551362997876, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 792.974, "cuda_time_us": 17.919, "pct_cuda_time": 0.2584230124690294, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03691963345726408, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.887, "pct_cuda_time": 0.20027458977383844, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02122878923792685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.373, "cuda_time_us": 16.992, "pct_cuda_time": 0.2450540670725904, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.992, "pct_cuda_time": 0.2450540670725904, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.225, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 605.708, "cuda_time_us": 134.814, "pct_cuda_time": 1.9442513534795314, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.712, "cuda_time_us": 81.791, "pct_cuda_time": 1.1795678672277683, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.791, "pct_cuda_time": 1.1795678672277683, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.066, "cuda_time_us": 9.344, "pct_cuda_time": 0.13475666211901388, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.344, "pct_cuda_time": 0.13475666211901388, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 312.428, "cuda_time_us": 43.679, "pct_cuda_time": 0.6299268241327493, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.679, "pct_cuda_time": 0.6299268241327493, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2376.654, "cuda_time_us": 200.413, "pct_cuda_time": 2.8903025390901043, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.426, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1690.918, "cuda_time_us": 59.551, "pct_cuda_time": 0.8588285515677866, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.912, "cuda_time_us": 21.728, "pct_cuda_time": 0.31335538896852894, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.728, "pct_cuda_time": 0.31335538896852894, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 466.217, "cuda_time_us": 3.648, "pct_cuda_time": 0.052610477676601326, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.052610477676601326, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.732, "cuda_time_us": 18.144000000000002, "pct_cuda_time": 0.2616679021283592, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.035996642620832485, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.208, "pct_cuda_time": 0.2049039656878157, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.020767293819711048, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 199.007, "cuda_time_us": 16.031, "pct_cuda_time": 0.23119478279429706, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.031, "pct_cuda_time": 0.23119478279429706, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.721, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.583, "cuda_time_us": 134.59, "pct_cuda_time": 1.9410208855520208, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.884, "cuda_time_us": 81.375, "pct_cuda_time": 1.1735684267909627, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.375, "pct_cuda_time": 1.1735684267909627, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.19, "cuda_time_us": 9.408, "pct_cuda_time": 0.13567965295544548, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.408, "pct_cuda_time": 0.13567965295544548, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.143, "cuda_time_us": 43.807, "pct_cuda_time": 0.6317728058056125, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.807, "pct_cuda_time": 0.6317728058056125, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2304.747, "cuda_time_us": 201.56400000000002, "pct_cuda_time": 2.9069019524140542, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.422, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1618.942, "cuda_time_us": 60.415, "pct_cuda_time": 0.8712889278596132, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.451, "cuda_time_us": 22.335, "pct_cuda_time": 0.32210938018280993, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.335, "pct_cuda_time": 0.32210938018280993, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.911, "cuda_time_us": 3.808, "pct_cuda_time": 0.05491795476768032, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05491795476768032, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 675.323, "cuda_time_us": 17.952, "pct_cuda_time": 0.2588989296190644, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.03553514720261668, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.208, "pct_cuda_time": 0.2049039656878157, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 173.223, "cuda_time_us": 16.32, "pct_cuda_time": 0.23536266329005853, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.32, "pct_cuda_time": 0.23536266329005853, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.563, "cuda_time_us": 3.232, "pct_cuda_time": 0.046611037239795906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.046611037239795906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.383, "cuda_time_us": 134.84500000000003, "pct_cuda_time": 1.9446984271659282, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.039, "cuda_time_us": 81.95, "pct_cuda_time": 1.181860922587028, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.95, "pct_cuda_time": 1.181860922587028, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.668, "cuda_time_us": 8.832, "pct_cuda_time": 0.1273727354275611, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.832, "pct_cuda_time": 0.1273727354275611, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.488, "cuda_time_us": 44.063, "pct_cuda_time": 0.6354647691513389, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.063, "pct_cuda_time": 0.6354647691513389, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2312.394, "cuda_time_us": 201.183, "pct_cuda_time": 2.901407272590922, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.464, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1646.226, "cuda_time_us": 60.25600000000001, "pct_cuda_time": 0.8689958725003536, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.001, "cuda_time_us": 21.792, "pct_cuda_time": 0.31427837980496054, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.792, "pct_cuda_time": 0.31427837980496054, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.635, "cuda_time_us": 3.968, "pct_cuda_time": 0.05722543185875933, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.968, "pct_cuda_time": 0.05722543185875933, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 695.016, "cuda_time_us": 17.92, "pct_cuda_time": 0.25843743420084864, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.72, "pct_cuda_time": 0.039227110548343096, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.92, "pct_cuda_time": 0.20075050692387347, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 195.761, "cuda_time_us": 16.576, "pct_cuda_time": 0.23905462663578497, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.576, "pct_cuda_time": 0.23905462663578497, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.018, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 439.002, "cuda_time_us": 134.655, "pct_cuda_time": 1.9419582981202717, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 146.244, "cuda_time_us": 81.631, "pct_cuda_time": 1.1772603901366891, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.631, "pct_cuda_time": 1.1772603901366891, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.337, "cuda_time_us": 8.928, "pct_cuda_time": 0.12875722168220852, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12875722168220852, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.801, "cuda_time_us": 44.096, "pct_cuda_time": 0.6359406863013738, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.096, "pct_cuda_time": 0.6359406863013738, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2419.812, "cuda_time_us": 200.894, "pct_cuda_time": 2.8972393920951607, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.681, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1752.114, "cuda_time_us": 60.223, "pct_cuda_time": 0.8685199553503183, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.741, "cuda_time_us": 21.92, "pct_cuda_time": 0.31612436147782375, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.92, "pct_cuda_time": 0.31612436147782375, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 525.224, "cuda_time_us": 3.775, "pct_cuda_time": 0.05444203761764528, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.775, "pct_cuda_time": 0.05444203761764528, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 733.396, "cuda_time_us": 18.176000000000002, "pct_cuda_time": 0.262129397546575, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.816, "pct_cuda_time": 0.04061159680299049, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.856, "pct_cuda_time": 0.19982751608744187, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.02169028465614265, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 200.779, "cuda_time_us": 16.352, "pct_cuda_time": 0.23582415870827436, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.352, "pct_cuda_time": 0.23582415870827436, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.408, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 433.368, "cuda_time_us": 134.399, "pct_cuda_time": 1.9382663347745452, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 146.514, "cuda_time_us": 81.567, "pct_cuda_time": 1.1763373993002575, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.567, "pct_cuda_time": 1.1763373993002575, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.146, "cuda_time_us": 8.992, "pct_cuda_time": 0.12968021251864012, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.12968021251864012, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.122, "cuda_time_us": 43.84, "pct_cuda_time": 0.6322487229556475, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.84, "pct_cuda_time": 0.6322487229556475, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2283.505, "cuda_time_us": 200.63700000000003, "pct_cuda_time": 2.893533007017615, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.194, "cuda_time_us": 3.2, "pct_cuda_time": 0.04614954182158011, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04614954182158011, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1630.425, "cuda_time_us": 59.968, "pct_cuda_time": 0.8648424137364111, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.396, "cuda_time_us": 21.76, "pct_cuda_time": 0.31381688438674477, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.76, "pct_cuda_time": 0.31381688438674477, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 478.952, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 679.245, "cuda_time_us": 18.496000000000002, "pct_cuda_time": 0.26674435172873306, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.03553514720261668, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.336, "pct_cuda_time": 0.20674994736067887, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.024459257165437457, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.099, "cuda_time_us": 16.032, "pct_cuda_time": 0.23120920452611632, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.032, "pct_cuda_time": 0.23120920452611632, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.58, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 431.433, "cuda_time_us": 134.39700000000002, "pct_cuda_time": 1.9382374913109068, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 145.208, "cuda_time_us": 81.79, "pct_cuda_time": 1.179553445495949, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.79, "pct_cuda_time": 1.179553445495949, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.066, "cuda_time_us": 8.864, "pct_cuda_time": 0.1278342308457769, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.1278342308457769, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.303, "cuda_time_us": 43.743, "pct_cuda_time": 0.6308498149691808, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.743, "pct_cuda_time": 0.6308498149691808, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2334.514, "cuda_time_us": 201.75900000000001, "pct_cuda_time": 2.9097141901188066, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.778, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1681.688, "cuda_time_us": 59.713, "pct_cuda_time": 0.8611648721225041, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.878, "cuda_time_us": 21.664, "pct_cuda_time": 0.31243239813209733, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.664, "pct_cuda_time": 0.31243239813209733, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 508.895, "cuda_time_us": 3.84, "pct_cuda_time": 0.05537945018589612, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05537945018589612, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 708.925, "cuda_time_us": 18.081, "pct_cuda_time": 0.2607593330237469, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.592, "pct_cuda_time": 0.03738112887547989, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.176, "pct_cuda_time": 0.20444247026959986, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.018935733878667087, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.763, "cuda_time_us": 16.128, "pct_cuda_time": 0.23259369078076375, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.128, "pct_cuda_time": 0.23259369078076375, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.562, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 432.061, "cuda_time_us": 135.774, "pct_cuda_time": 1.9580962160260056, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 148.327, "cuda_time_us": 82.143, "pct_cuda_time": 1.184644316828142, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.143, "pct_cuda_time": 1.184644316828142, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.909, "cuda_time_us": 9.056, "pct_cuda_time": 0.1306032033550717, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.1306032033550717, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.29, "cuda_time_us": 44.575, "pct_cuda_time": 0.6428486958427917, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.575, "pct_cuda_time": 0.6428486958427917, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2246.045, "cuda_time_us": 200.926, "pct_cuda_time": 2.897700887513376, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.799, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1602.955, "cuda_time_us": 59.743, "pct_cuda_time": 0.8615975240770815, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.349, "cuda_time_us": 21.759, "pct_cuda_time": 0.3138024626549255, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.759, "pct_cuda_time": 0.3138024626549255, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.18, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 669.593, "cuda_time_us": 18.144000000000002, "pct_cuda_time": 0.2616679021283592, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.336, "pct_cuda_time": 0.20674994736067887, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.416, "cuda_time_us": 16.064, "pct_cuda_time": 0.23167069994433215, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.064, "pct_cuda_time": 0.23167069994433215, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.453, "cuda_time_us": 3.265, "pct_cuda_time": 0.04708695438983095, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.265, "pct_cuda_time": 0.04708695438983095, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 427.008, "cuda_time_us": 134.846, "pct_cuda_time": 1.944712848897747, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 147.472, "cuda_time_us": 81.407, "pct_cuda_time": 1.1740299222091786, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.407, "pct_cuda_time": 1.1740299222091786, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.25, "cuda_time_us": 9.152, "pct_cuda_time": 0.1319876896097191, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.1319876896097191, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.884, "cuda_time_us": 44.287, "pct_cuda_time": 0.6386952370788495, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.287, "pct_cuda_time": 0.6386952370788495, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2694.602, "cuda_time_us": 200.735, "pct_cuda_time": 2.894946336735901, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.148, "cuda_time_us": 3.264, "pct_cuda_time": 0.0470725326580117, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.0470725326580117, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2001.478, "cuda_time_us": 60.480000000000004, "pct_cuda_time": 0.8722263404278641, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.825, "cuda_time_us": 22.048, "pct_cuda_time": 0.3179703431506869, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.048, "pct_cuda_time": 0.3179703431506869, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 568.298, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 940.098, "cuda_time_us": 18.016000000000002, "pct_cuda_time": 0.259821920455496, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.016, "pct_cuda_time": 0.20213499317852085, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02122878923792685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 215.498, "cuda_time_us": 16.736, "pct_cuda_time": 0.24136210372686395, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.736, "pct_cuda_time": 0.24136210372686395, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 94.666, "cuda_time_us": 3.2, "pct_cuda_time": 0.04614954182158011, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04614954182158011, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 453.545, "cuda_time_us": 133.791, "pct_cuda_time": 1.929497921828445, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 151.339, "cuda_time_us": 80.736, "pct_cuda_time": 1.164352940158466, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.736, "pct_cuda_time": 1.164352940158466, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.046, "cuda_time_us": 8.928, "pct_cuda_time": 0.12875722168220852, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.12875722168220852, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.589, "cuda_time_us": 44.127, "pct_cuda_time": 0.6363877599877704, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.127, "pct_cuda_time": 0.6363877599877704, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2334.38, "cuda_time_us": 200.798, "pct_cuda_time": 2.895854905840513, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.466, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1659.81, "cuda_time_us": 60.033, "pct_cuda_time": 0.8657798263046621, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.312, "cuda_time_us": 21.632, "pct_cuda_time": 0.31197090271388156, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.632, "pct_cuda_time": 0.31197090271388156, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.107, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 683.699, "cuda_time_us": 18.305, "pct_cuda_time": 0.2639898009512574, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03507365178440088, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.401, "pct_cuda_time": 0.20768735992892973, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02122878923792685, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.468, "cuda_time_us": 16.416, "pct_cuda_time": 0.23674714954470596, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.416, "pct_cuda_time": 0.23674714954470596, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.363, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.0447650555669327, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 440.908, "cuda_time_us": 134.493, "pct_cuda_time": 1.9396219775655539, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 146.958, "cuda_time_us": 81.054, "pct_cuda_time": 1.1689390508769857, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.054, "pct_cuda_time": 1.1689390508769857, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.889, "cuda_time_us": 9.152, "pct_cuda_time": 0.1319876896097191, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.1319876896097191, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.87, "cuda_time_us": 44.287, "pct_cuda_time": 0.6386952370788495, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.287, "pct_cuda_time": 0.6386952370788495, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2378.262, "cuda_time_us": 201.02100000000002, "pct_cuda_time": 2.899070952036205, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.218, "cuda_time_us": 3.04, "pct_cuda_time": 0.0438420647305011, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.0438420647305011, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1711.637, "cuda_time_us": 60.637, "pct_cuda_time": 0.8744905523234852, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.069, "cuda_time_us": 22.303, "pct_cuda_time": 0.3216478847645941, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.303, "pct_cuda_time": 0.3216478847645941, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.634, "cuda_time_us": 4.192, "pct_cuda_time": 0.060455899786269945, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.192, "pct_cuda_time": 0.060455899786269945, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 691.866, "cuda_time_us": 17.919, "pct_cuda_time": 0.2584230124690294, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.03553514720261668, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.175, "pct_cuda_time": 0.20442804853778063, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01845981672863204, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 251.678, "cuda_time_us": 16.223, "pct_cuda_time": 0.23396375530359187, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.223, "pct_cuda_time": 0.23396375530359187, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.291, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 430.581, "cuda_time_us": 134.17600000000002, "pct_cuda_time": 1.935050288578854, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.875, "cuda_time_us": 80.768, "pct_cuda_time": 1.1648144355766819, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.768, "pct_cuda_time": 1.1648144355766819, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.945, "cuda_time_us": 8.96, "pct_cuda_time": 0.12921871710042432, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.12921871710042432, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.487, "cuda_time_us": 44.448, "pct_cuda_time": 0.6410171359017477, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.448, "pct_cuda_time": 0.6410171359017477, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2399.662, "cuda_time_us": 201.022, "pct_cuda_time": 2.8990853737680236, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.766, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044303560148716906, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1734.389, "cuda_time_us": 59.839, "pct_cuda_time": 0.8629820103317287, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.889, "cuda_time_us": 21.952, "pct_cuda_time": 0.3165858568960395, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.952, "pct_cuda_time": 0.3165858568960395, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 505.506, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.054456459349464525, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 755.85, "cuda_time_us": 18.048000000000002, "pct_cuda_time": 0.2602834158737118, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.036458138039048286, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 14.208, "pct_cuda_time": 0.2049039656878157, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.018921312146847842, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.838, "cuda_time_us": 16.063, "pct_cuda_time": 0.2316562782125129, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.063, "pct_cuda_time": 0.2316562782125129, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.328, "cuda_time_us": 3.2, "pct_cuda_time": 0.04614954182158011, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04614954182158011, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.075, "cuda_time_us": 134.911, "pct_cuda_time": 1.945650261465998, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 151.17, "cuda_time_us": 81.727, "pct_cuda_time": 1.1786448763913366, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.727, "pct_cuda_time": 1.1786448763913366, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.614, "cuda_time_us": 9.28, "pct_cuda_time": 0.1338336712825823, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.28, "pct_cuda_time": 0.1338336712825823, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.556, "cuda_time_us": 43.904, "pct_cuda_time": 0.633171713792079, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.904, "pct_cuda_time": 0.633171713792079, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2281.576, "cuda_time_us": 200.37900000000002, "pct_cuda_time": 2.8898122002082505, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.363, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1641.366, "cuda_time_us": 60.126000000000005, "pct_cuda_time": 0.8671210473638518, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.053, "cuda_time_us": 21.599, "pct_cuda_time": 0.31149498556384647, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.599, "pct_cuda_time": 0.31149498556384647, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[14, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 491.566, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05307197309481713, "trace": "_C::rotary_embedding(int64[14], bfloat16[14, 4096], bfloat16[14, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 687.849, "cuda_time_us": 17.855, "pct_cuda_time": 0.2575000216325978, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.035996642620832485, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[14], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 13.855, "pct_cuda_time": 0.1998130943556226, "trace": "_vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.02169028465614265, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[14, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[14, 1, 32, 128], None, None, None, None, int32[14], None, None, int32[14, 17], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[14, 32, 128], bfloat16[14, 8, 128], bfloat16[14, 8, 128], bfloat16[14, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.486, "cuda_time_us": 16.992, "pct_cuda_time": 0.2450540670725904, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.992, "pct_cuda_time": 0.2450540670725904, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[14, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.543, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.0456880464033643, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 422.648, "cuda_time_us": 133.917, "pct_cuda_time": 1.9313150600376698, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 139.165, "cuda_time_us": 81.279, "pct_cuda_time": 1.1721839405363155, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.279, "pct_cuda_time": 1.1721839405363155, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[14, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.65, "cuda_time_us": 8.799, "pct_cuda_time": 0.12689681827752602, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.799, "pct_cuda_time": 0.12689681827752602, "trace": "_C::silu_and_mul(bfloat16[14, 14336], bfloat16[14, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.372, "cuda_time_us": 43.839, "pct_cuda_time": 0.6322343012238282, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.839, "pct_cuda_time": 0.6322343012238282, "trace": "mm(bfloat16[14, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[14, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[14, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.73, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04522655098514851, "trace": "_C::fused_add_rms_norm(bfloat16[14, 4096], bfloat16[14, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 503.87, "cuda_time_us": 350.491, "pct_cuda_time": 5.054687207058572, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 8.032, "pct_cuda_time": 0.11583534997216607, "trace": "index_select(bfloat16[14, 4096], 0, int64[14])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010614394618963425, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[14, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 341.723, "pct_cuda_time": 4.928237462467443, "trace": "mm(bfloat16[14, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[14, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[14, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3942.61, "cuda_time_us": 127.678, "pct_cuda_time": 1.8413378752174077, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010614394618963425, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010614394618963425, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.011061468305359982, "trace": "copy_(int32[14], int32[14], True) <- _to_copy(int32[14], 3, 0, None, None, True, None) <- to(int32[14], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011075890037179226, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011537385455395028, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011537385455395028, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011075890037179226, "trace": "copy_(bfloat16[14], bfloat16[14], True) <- _to_copy(bfloat16[14], 15, 0, None, None, True, None) <- to(bfloat16[14], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 6.976, "pct_cuda_time": 0.10060600117104462, "trace": "copy_(float32[14, 128256], bfloat16[14, 128256], False) <- _to_copy(bfloat16[14, 128256], 6, None, None, None, False, None) <- to(bfloat16[14, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 9.728, "pct_cuda_time": 0.14029460713760353, "trace": "div_(float32[14, 128256], bfloat16[14, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 34.943, "pct_cuda_time": 0.5039385749598355, "trace": "_softmax(float32[14, 128256], -1, False) <- softmax(float32[14, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.223, "pct_cuda_time": 0.40702453713451725, "trace": "_log_softmax(float32[14, 128256], -1, False) <- log_softmax(float32[14, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.024459257165437457, "trace": "copy_(int64[14], int32[14], False) <- _to_copy(int32[14], 4, None, None, None, False, None) <- to(int32[14], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 10.176, "pct_cuda_time": 0.14675554299262472, "trace": "index(float32[14, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 27.936, "pct_cuda_time": 0.4028855001023944, "trace": "argmax(float32[14, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.625, "pct_cuda_time": 0.03785704602551493, "trace": "copy_(int64[14], int64[14], False) <- _to_copy(int64[14], 4, 0, None, None, False, None) <- to(int64[14], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] } }