{ "context": { "python_version": "3.12.9 | packaged by Anaconda, Inc. | (main, Feb 6 2025, 18:56:27) [GCC 11.2.0]", "torch_version": "2.5.1+cu124", "engine_args": { "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "served_model_name": null, "tokenizer": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "task": "auto", "skip_tokenizer_init": false, "tokenizer_mode": "auto", "trust_remote_code": false, "allowed_local_media_path": null, "download_dir": null, "load_format": "dummy", "config_format": "auto", "dtype": "auto", "kv_cache_dtype": "auto", "seed": 0, "max_model_len": null, "distributed_executor_backend": null, "pipeline_parallel_size": 1, "tensor_parallel_size": 1, "max_parallel_loading_workers": null, "block_size": null, "enable_prefix_caching": false, "disable_sliding_window": false, "use_v2_block_manager": true, "swap_space": 4, "cpu_offload_gb": 0, "gpu_memory_utilization": 0.9, "max_num_batched_tokens": 8000, "max_num_partial_prefills": 1, "max_long_partial_prefills": 1, "long_prefill_token_threshold": 0, "max_num_seqs": 256, "max_logprobs": 20, "disable_log_stats": false, "revision": null, "code_revision": null, "rope_scaling": null, "rope_theta": null, "hf_overrides": null, "tokenizer_revision": null, "quantization": null, "enforce_eager": true, "max_seq_len_to_capture": 8192, "disable_custom_all_reduce": false, "tokenizer_pool_size": 0, "tokenizer_pool_type": "ray", "tokenizer_pool_extra_config": null, "limit_mm_per_prompt": null, "mm_processor_kwargs": null, "disable_mm_preprocessor_cache": false, "enable_lora": false, "enable_lora_bias": false, "max_loras": 1, "max_lora_rank": 16, "enable_prompt_adapter": false, "max_prompt_adapters": 1, "max_prompt_adapter_token": 0, "fully_sharded_loras": false, "lora_extra_vocab_size": 256, "long_lora_scaling_factors": null, "lora_dtype": "auto", "max_cpu_loras": null, "device": "auto", "num_scheduler_steps": 1, "multi_step_stream_outputs": true, "ray_workers_use_nsight": false, "num_gpu_blocks_override": null, "num_lookahead_slots": 0, "model_loader_extra_config": null, "ignore_patterns": [], "preemption_mode": null, "scheduler_delay_factor": 0.0, "enable_chunked_prefill": null, "guided_decoding_backend": "xgrammar", "logits_processor_pattern": null, "speculative_model": null, "speculative_model_quantization": null, "speculative_draft_tensor_parallel_size": null, "num_speculative_tokens": null, "speculative_disable_mqa_scorer": false, "speculative_max_model_len": null, "speculative_disable_by_batch_size": null, "ngram_prompt_lookup_max": null, "ngram_prompt_lookup_min": null, "spec_decoding_acceptance_method": "rejection_sampler", "typical_acceptance_sampler_posterior_threshold": null, "typical_acceptance_sampler_posterior_alpha": null, "qlora_adapter_name_or_path": null, "disable_logprobs_during_spec_decoding": null, "otlp_traces_endpoint": null, "collect_detailed_traces": null, "disable_async_output_proc": false, "scheduling_policy": "fcfs", "scheduler_cls": "vllm.core.scheduler.Scheduler", "override_neuron_config": null, "override_pooler_config": null, "compilation_config": null, "worker_cls": "auto", "kv_transfer_config": null, "generation_config": null, "override_generation_config": null, "enable_sleep_mode": false, "model_impl": "auto", "calculate_kv_scales": false, "additional_config": null }, "prompt_len": 0, "batch_size": 8, "num_steps": 2, "complete_num_requests_per_step": null, "save_chrome_traces_folder": null }, "prefill": { "metadata": { "num_running_seqs": null }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 22705.202, "pct_cuda_time": 97.916491021584, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 30.112, "pct_cuda_time": 0.12985840767423856, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 30.112, "pct_cuda_time": 0.12985840767423856, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 22664.882, "pct_cuda_time": 97.74261047570776, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 642.487, "pct_cuda_time": 2.7707338858726924, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 13.184, "pct_cuda_time": 0.05685617849286535, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 629.303, "pct_cuda_time": 2.7138777073798273, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 5445.852000000001, "pct_cuda_time": 23.485310479196585, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 2790.555, "pct_cuda_time": 12.034306217700081, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.677000000000007, "pct_cuda_time": 0.1021073830533657, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2766.8780000000006, "pct_cuda_time": 11.932198834646718, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 417.02, "pct_cuda_time": 1.7984043958657998, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 417.02, "pct_cuda_time": 1.7984043958657998, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 725.4350000000001, "pct_cuda_time": 3.1284482588722526, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 184.25600000000003, "pct_cuda_time": 0.794606494567764, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 496.3429999999999, "pct_cuda_time": 2.1404859072879447, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 44.836000000000006, "pct_cuda_time": 0.19335585701654362, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 1512.8419999999999, "pct_cuda_time": 6.5241516067584495, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 1512.8419999999999, "pct_cuda_time": 6.5241516067584495, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 16576.543, "pct_cuda_time": 71.48656611063848, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 10330.481999999996, "pct_cuda_time": 44.550343485234556, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.58400000000001, "pct_cuda_time": 0.10170631929427618, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 10306.898000000001, "pct_cuda_time": 44.448637165940305, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 1457.7400000000002, "pct_cuda_time": 6.2865234857546675, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 1457.7400000000002, "pct_cuda_time": 6.2865234857546675, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 4788.321000000001, "pct_cuda_time": 20.64969913964924, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 4788.321000000001, "pct_cuda_time": 20.64969913964924, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 10.208, "pct_cuda_time": 0.04402213820200011, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 10.208, "pct_cuda_time": 0.04402213820200011, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 363.13100000000003, "pct_cuda_time": 1.5660073537843364, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 5.6, "pct_cuda_time": 0.02415007581614426, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 356.795, "pct_cuda_time": 1.5386832680037843, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 120.00000000000001, "pct_cuda_time": 0.5175016246316628, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.4079999999999995, "pct_cuda_time": 0.023322073216733602, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 4.736, "pct_cuda_time": 0.02042406411879629, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 6.432, "pct_cuda_time": 0.027738087080257125, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 35.2, "pct_cuda_time": 0.15180047655862108, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.128, "pct_cuda_time": 0.12130238081366175, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.92, "pct_cuda_time": 0.008280025994106604, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 7.296, "pct_cuda_time": 0.0314640987776051, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 27.712, "pct_cuda_time": 0.11950837518160531, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 3.168, "pct_cuda_time": 0.013662042890275899, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 81496.168, "cuda_time_us": 22705.202, "pct_cuda_time": 97.916491021584, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 337.977, "cuda_time_us": 30.112, "pct_cuda_time": 0.12985840767423856, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 30.112, "pct_cuda_time": 0.12985840767423856, "trace": "index_select(bfloat16[128256, 4096], 0, int64[1024]) <- embedding(bfloat16[128256, 4096], int64[1024], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 3986.126, "cuda_time_us": 711.4780000000001, "pct_cuda_time": 3.068258507414052, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 231.219, "cuda_time_us": 13.184, "pct_cuda_time": 0.05685617849286535, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.184, "pct_cuda_time": 0.05685617849286535, "trace": "_C::rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2925.774, "cuda_time_us": 170.75, "pct_cuda_time": 0.7363616867154701, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 413.432, "cuda_time_us": 88.159, "pct_cuda_time": 0.3801868810491897, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 87.423, "pct_cuda_time": 0.37701287108478215, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 967.397, "cuda_time_us": 12.736, "pct_cuda_time": 0.05492417242757382, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.736, "pct_cuda_time": 0.05492417242757382, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1033.369, "cuda_time_us": 22.4, "pct_cuda_time": 0.09660030326457704, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.664, "pct_cuda_time": 0.024426076682614484, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.456, "pct_cuda_time": 0.06665420925255816, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.005520017329404403, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 272.213, "cuda_time_us": 47.455, "pct_cuda_time": 0.20465032997412963, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.455, "pct_cuda_time": 0.20465032997412963, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 108.724, "cuda_time_us": 10.112, "pct_cuda_time": 0.043608136902294786, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.112, "pct_cuda_time": 0.043608136902294786, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 604.931, "cuda_time_us": 517.432, "pct_cuda_time": 2.231432505303421, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 194.58, "cuda_time_us": 320.667, "pct_cuda_time": 1.3828807788813449, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0033120103976426417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 319.899, "pct_cuda_time": 1.3795687684837026, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 142.76, "cuda_time_us": 45.855, "pct_cuda_time": 0.1977503083123741, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.855, "pct_cuda_time": 0.1977503083123741, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 198.104, "cuda_time_us": 150.91, "pct_cuda_time": 0.6508014181097019, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.91, "pct_cuda_time": 0.6508014181097019, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2571.233, "cuda_time_us": 708.246, "pct_cuda_time": 3.054320463657305, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.772, "cuda_time_us": 9.984, "pct_cuda_time": 0.04305613516935434, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.984, "pct_cuda_time": 0.04305613516935434, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1830.217, "cuda_time_us": 170.236, "pct_cuda_time": 0.7341450547566312, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.708, "cuda_time_us": 87.61500000000001, "pct_cuda_time": 0.37784087368419284, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.879, "pct_cuda_time": 0.3746668637197853, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 528.413, "cuda_time_us": 12.672, "pct_cuda_time": 0.054648171561103596, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.672, "pct_cuda_time": 0.054648171561103596, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 799.426, "cuda_time_us": 22.654, "pct_cuda_time": 0.09769568170338074, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.727, "pct_cuda_time": 0.024697765035546104, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.423, "pct_cuda_time": 0.06651189630578445, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.006486020362050174, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 187.344, "cuda_time_us": 47.295, "pct_cuda_time": 0.2039603278079541, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.295, "pct_cuda_time": 0.2039603278079541, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.118, "cuda_time_us": 9.92, "pct_cuda_time": 0.042780134302884125, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.92, "pct_cuda_time": 0.042780134302884125, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 500.185, "cuda_time_us": 518.106, "pct_cuda_time": 2.2343391394284358, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.553, "cuda_time_us": 322.972, "pct_cuda_time": 1.3928211225878113, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.236, "pct_cuda_time": 1.389647112623404, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.402, "cuda_time_us": 45.6, "pct_cuda_time": 0.19665061736003184, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.6, "pct_cuda_time": 0.19665061736003184, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 177.88, "cuda_time_us": 149.534, "pct_cuda_time": 0.6448673994805921, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.534, "pct_cuda_time": 0.6448673994805921, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2530.596, "cuda_time_us": 708.2479999999999, "pct_cuda_time": 3.054329088684382, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.964, "cuda_time_us": 9.984, "pct_cuda_time": 0.04305613516935434, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.984, "pct_cuda_time": 0.04305613516935434, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1796.101, "cuda_time_us": 170.911, "pct_cuda_time": 0.7370560013951843, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.364, "cuda_time_us": 87.423, "pct_cuda_time": 0.37701287108478215, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.687, "pct_cuda_time": 0.37383886112037457, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 529.777, "cuda_time_us": 13.344, "pct_cuda_time": 0.0575461806590409, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.344, "pct_cuda_time": 0.0575461806590409, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 781.038, "cuda_time_us": 22.721, "pct_cuda_time": 0.09798462011046676, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.697, "pct_cuda_time": 0.02456838962938819, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.06706821055226349, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.006348019928815063, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.543, "cuda_time_us": 47.423, "pct_cuda_time": 0.20451232954089454, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.423, "pct_cuda_time": 0.20451232954089454, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.93, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.399, "cuda_time_us": 517.305, "pct_cuda_time": 2.230884816084019, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 184.888, "cuda_time_us": 322.364, "pct_cuda_time": 1.3901991143563444, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.628, "pct_cuda_time": 1.3870251043919368, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.653, "cuda_time_us": 45.183, "pct_cuda_time": 0.19485229921443684, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.183, "pct_cuda_time": 0.19485229921443684, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.162, "cuda_time_us": 149.758, "pct_cuda_time": 0.6458334025132381, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.758, "pct_cuda_time": 0.6458334025132381, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2425.071, "cuda_time_us": 707.7690000000001, "pct_cuda_time": 3.052263394699395, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.838, "cuda_time_us": 10.208, "pct_cuda_time": 0.04402213820200011, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.208, "pct_cuda_time": 0.04402213820200011, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1735.42, "cuda_time_us": 170.01500000000001, "pct_cuda_time": 0.7331919892646013, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.542, "cuda_time_us": 87.295, "pct_cuda_time": 0.3764608693518417, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0033120103976426417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.527, "pct_cuda_time": 0.3731488589541991, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.928, "cuda_time_us": 13.056, "pct_cuda_time": 0.056304176759924905, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.056, "pct_cuda_time": 0.056304176759924905, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.892, "cuda_time_us": 22.496000000000002, "pct_cuda_time": 0.0970143045642824, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.76, "pct_cuda_time": 0.02484007798231981, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.456, "pct_cuda_time": 0.06665420925255816, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.005520017329404403, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 199.654, "cuda_time_us": 47.168, "pct_cuda_time": 0.20341263858855227, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.168, "pct_cuda_time": 0.20341263858855227, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.78, "cuda_time_us": 9.953, "pct_cuda_time": 0.04292244724965783, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.953, "pct_cuda_time": 0.04292244724965783, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.455, "cuda_time_us": 517.5930000000001, "pct_cuda_time": 2.2321268199831357, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.84, "cuda_time_us": 322.236, "pct_cuda_time": 1.389647112623404, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.704, "pct_cuda_time": 0.0030360095311724217, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.532, "pct_cuda_time": 1.3866111030922315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.309, "cuda_time_us": 45.759, "pct_cuda_time": 0.1973363070126688, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.759, "pct_cuda_time": 0.1973363070126688, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.217, "cuda_time_us": 149.598, "pct_cuda_time": 0.6451434003470624, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.598, "pct_cuda_time": 0.6451434003470624, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2439.352, "cuda_time_us": 707.221, "pct_cuda_time": 3.0499001372802432, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.011, "cuda_time_us": 10.112, "pct_cuda_time": 0.043608136902294786, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.112, "pct_cuda_time": 0.043608136902294786, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1774.793, "cuda_time_us": 169.373, "pct_cuda_time": 0.7304233555728218, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 167.265, "cuda_time_us": 86.558, "pct_cuda_time": 0.3732825468738956, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.822, "pct_cuda_time": 0.37010853690948803, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 519.633, "cuda_time_us": 13.216, "pct_cuda_time": 0.05699417892610045, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.216, "pct_cuda_time": 0.05699417892610045, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 740.097, "cuda_time_us": 22.464000000000002, "pct_cuda_time": 0.09687630413104728, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.728, "pct_cuda_time": 0.024702077549084704, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.424, "pct_cuda_time": 0.06651620881932306, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.005658017762639513, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 176.471, "cuda_time_us": 47.135, "pct_cuda_time": 0.20327032564177852, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.135, "pct_cuda_time": 0.20327032564177852, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.384, "cuda_time_us": 9.888, "pct_cuda_time": 0.04264213386964901, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.888, "pct_cuda_time": 0.04264213386964901, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.287, "cuda_time_us": 517.848, "pct_cuda_time": 2.233226510935477, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.323, "cuda_time_us": 322.491, "pct_cuda_time": 1.3907468035757462, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.755, "pct_cuda_time": 1.3875727936113387, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.769, "cuda_time_us": 45.952, "pct_cuda_time": 0.1981686221256181, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.952, "pct_cuda_time": 0.1981686221256181, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.018, "cuda_time_us": 149.405, "pct_cuda_time": 0.6443110852341132, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.405, "pct_cuda_time": 0.6443110852341132, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2662.102, "cuda_time_us": 707.6700000000001, "pct_cuda_time": 3.0518364558590734, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.03, "cuda_time_us": 9.952, "pct_cuda_time": 0.04291813473611923, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.952, "pct_cuda_time": 0.04291813473611923, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1936.555, "cuda_time_us": 170.078, "pct_cuda_time": 0.7334636776175328, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.027, "cuda_time_us": 87.295, "pct_cuda_time": 0.3764608693518417, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.559, "pct_cuda_time": 0.37328685938743417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 697.172, "cuda_time_us": 12.512, "pct_cuda_time": 0.05395816939492804, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.512, "pct_cuda_time": 0.05395816939492804, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 744.333, "cuda_time_us": 23.008, "pct_cuda_time": 0.09922231149604413, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.792, "pct_cuda_time": 0.02497807841555492, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.06775821271843904, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.006486020362050174, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.687, "cuda_time_us": 47.263, "pct_cuda_time": 0.20382232737471898, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.263, "pct_cuda_time": 0.20382232737471898, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.231, "cuda_time_us": 10.112, "pct_cuda_time": 0.043608136902294786, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.112, "pct_cuda_time": 0.043608136902294786, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 465.939, "cuda_time_us": 517.528, "pct_cuda_time": 2.2318465066031266, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 176.492, "cuda_time_us": 321.723, "pct_cuda_time": 1.387434793178104, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.003169697450868934, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 320.988, "pct_cuda_time": 1.3842650957272349, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.609, "cuda_time_us": 46.207, "pct_cuda_time": 0.19926831307796036, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 46.207, "pct_cuda_time": 0.19926831307796036, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.464, "cuda_time_us": 149.598, "pct_cuda_time": 0.6451434003470624, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.598, "pct_cuda_time": 0.6451434003470624, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2381.744, "cuda_time_us": 708.885, "pct_cuda_time": 3.0570761598084686, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.398, "cuda_time_us": 10.207, "pct_cuda_time": 0.04401782568846152, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.207, "pct_cuda_time": 0.04401782568846152, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1654.683, "cuda_time_us": 169.949, "pct_cuda_time": 0.7329073633710538, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.11, "cuda_time_us": 87.42200000000001, "pct_cuda_time": 0.3770085585712436, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.686, "pct_cuda_time": 0.373834548606836, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 460.843, "cuda_time_us": 12.736, "pct_cuda_time": 0.05492417242757382, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.736, "pct_cuda_time": 0.05492417242757382, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 722.929, "cuda_time_us": 22.624, "pct_cuda_time": 0.09756630629722281, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.792, "pct_cuda_time": 0.02497807841555492, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.328, "pct_cuda_time": 0.06610220751961772, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.006486020362050174, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.527, "cuda_time_us": 47.167, "pct_cuda_time": 0.20340832607501366, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.167, "pct_cuda_time": 0.20340832607501366, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 113.646, "cuda_time_us": 9.76, "pct_cuda_time": 0.04209013213670857, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.76, "pct_cuda_time": 0.04209013213670857, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 472.231, "cuda_time_us": 518.969, "pct_cuda_time": 2.2380608386122454, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.201, "cuda_time_us": 322.94, "pct_cuda_time": 1.3926831221545766, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.204, "pct_cuda_time": 1.389509112190169, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.678, "cuda_time_us": 45.791, "pct_cuda_time": 0.19747430744590388, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.791, "pct_cuda_time": 0.19747430744590388, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.16, "cuda_time_us": 150.238, "pct_cuda_time": 0.6479034090117646, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.238, "pct_cuda_time": 0.6479034090117646, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2384.809, "cuda_time_us": 708.2779999999999, "pct_cuda_time": 3.05445846409054, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.64, "cuda_time_us": 9.824, "pct_cuda_time": 0.04236613300317879, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.824, "pct_cuda_time": 0.04236613300317879, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1711.564, "cuda_time_us": 170.974, "pct_cuda_time": 0.7373276897481159, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.927, "cuda_time_us": 88.447, "pct_cuda_time": 0.3814288849483057, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0033120103976426417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 87.679, "pct_cuda_time": 0.378116874550663, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 526.963, "cuda_time_us": 12.8, "pct_cuda_time": 0.05520017329404403, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.8, "pct_cuda_time": 0.05520017329404403, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 714.094, "cuda_time_us": 22.496000000000002, "pct_cuda_time": 0.0970143045642824, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.76, "pct_cuda_time": 0.02484007798231981, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.456, "pct_cuda_time": 0.06665420925255816, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.005520017329404403, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 175.123, "cuda_time_us": 47.231, "pct_cuda_time": 0.2036843269414839, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.231, "pct_cuda_time": 0.2036843269414839, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.104, "cuda_time_us": 10.112, "pct_cuda_time": 0.043608136902294786, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.112, "pct_cuda_time": 0.043608136902294786, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 457.756, "cuda_time_us": 517.3679999999999, "pct_cuda_time": 2.231156504436951, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.79, "cuda_time_us": 322.811, "pct_cuda_time": 1.3921268079080973, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.075, "pct_cuda_time": 1.3889527979436898, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.623, "cuda_time_us": 45.375, "pct_cuda_time": 0.19568030181384746, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.375, "pct_cuda_time": 0.19568030181384746, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.854, "cuda_time_us": 149.182, "pct_cuda_time": 0.643349394715006, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.182, "pct_cuda_time": 0.643349394715006, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2375.021, "cuda_time_us": 707.9580000000001, "pct_cuda_time": 3.0530784597581895, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.198, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1634.589, "cuda_time_us": 170.237, "pct_cuda_time": 0.7341493672701698, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 160.084, "cuda_time_us": 86.975, "pct_cuda_time": 0.37508086501949056, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0033120103976426417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.207, "pct_cuda_time": 0.3717688546218479, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 469.186, "cuda_time_us": 13.28, "pct_cuda_time": 0.05727017979257067, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.28, "pct_cuda_time": 0.05727017979257067, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 686.892, "cuda_time_us": 22.687, "pct_cuda_time": 0.09783799465015446, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.792, "pct_cuda_time": 0.02497807841555492, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.583, "pct_cuda_time": 0.06720189847196001, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.005658017762639513, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 168.665, "cuda_time_us": 47.295, "pct_cuda_time": 0.2039603278079541, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.295, "pct_cuda_time": 0.2039603278079541, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.396, "cuda_time_us": 10.015, "pct_cuda_time": 0.04318982308905086, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.015, "pct_cuda_time": 0.04318982308905086, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 535.917, "cuda_time_us": 517.69, "pct_cuda_time": 2.2325451337963798, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.785, "cuda_time_us": 323.03700000000003, "pct_cuda_time": 1.3931014359678207, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.003178322477946129, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.3, "pct_cuda_time": 1.3899231134898744, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 113.046, "cuda_time_us": 45.247, "pct_cuda_time": 0.19512830008090706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.247, "pct_cuda_time": 0.19512830008090706, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 218.032, "cuda_time_us": 149.406, "pct_cuda_time": 0.6443153977476518, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.406, "pct_cuda_time": 0.6443153977476518, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2384.079, "cuda_time_us": 707.034, "pct_cuda_time": 3.0490936972485256, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.706, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1718.637, "cuda_time_us": 169.88799999999998, "pct_cuda_time": 0.7326443000451993, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.504, "cuda_time_us": 87.263, "pct_cuda_time": 0.37632286891860656, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.527, "pct_cuda_time": 0.3731488589541991, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 504.67, "cuda_time_us": 12.8, "pct_cuda_time": 0.05520017329404403, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.8, "pct_cuda_time": 0.05520017329404403, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 739.582, "cuda_time_us": 22.720999999999997, "pct_cuda_time": 0.09798462011046674, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.728, "pct_cuda_time": 0.024702077549084704, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.06693021011902839, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.473, "pct_cuda_time": 0.006352332442353661, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 178.287, "cuda_time_us": 47.104, "pct_cuda_time": 0.20313663772208201, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.104, "pct_cuda_time": 0.20313663772208201, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.632, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.507, "cuda_time_us": 517.114, "pct_cuda_time": 2.230061125998147, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.496, "cuda_time_us": 322.108, "pct_cuda_time": 1.3890951108904637, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.372, "pct_cuda_time": 1.3859211009260561, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.051, "cuda_time_us": 45.184, "pct_cuda_time": 0.19485661172797542, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.184, "pct_cuda_time": 0.19485661172797542, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.78, "cuda_time_us": 149.822, "pct_cuda_time": 0.6461094033797082, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.822, "pct_cuda_time": 0.6461094033797082, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2362.982, "cuda_time_us": 706.677, "pct_cuda_time": 3.0475541299152464, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.715, "cuda_time_us": 9.951, "pct_cuda_time": 0.04291382222258064, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.951, "pct_cuda_time": 0.04291382222258064, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1701.54, "cuda_time_us": 169.62900000000002, "pct_cuda_time": 0.7315273590387028, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.04, "cuda_time_us": 86.71900000000001, "pct_cuda_time": 0.37397686155360976, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.983, "pct_cuda_time": 0.3708028515892022, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 461.207, "cuda_time_us": 13.024, "pct_cuda_time": 0.0561661763266898, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.024, "pct_cuda_time": 0.0561661763266898, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 787.06, "cuda_time_us": 22.624, "pct_cuda_time": 0.09756630629722281, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.76, "pct_cuda_time": 0.02484007798231981, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.424, "pct_cuda_time": 0.06651620881932306, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.006210019495579953, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 178.315, "cuda_time_us": 47.262, "pct_cuda_time": 0.2038180148611804, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.262, "pct_cuda_time": 0.2038180148611804, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.399, "cuda_time_us": 9.663, "pct_cuda_time": 0.04167181832346465, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.663, "pct_cuda_time": 0.04167181832346465, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.051, "cuda_time_us": 517.434, "pct_cuda_time": 2.231441130330498, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.72, "cuda_time_us": 322.68399999999997, "pct_cuda_time": 1.3915791186886954, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.948, "pct_cuda_time": 1.388405108724288, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.578, "cuda_time_us": 45.728, "pct_cuda_time": 0.1972026190929723, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.728, "pct_cuda_time": 0.1972026190929723, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.205, "cuda_time_us": 149.022, "pct_cuda_time": 0.6426593925488303, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.022, "pct_cuda_time": 0.6426593925488303, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2422.099, "cuda_time_us": 708.023, "pct_cuda_time": 3.053358773138198, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.596, "cuda_time_us": 10.111, "pct_cuda_time": 0.043603824388756186, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.111, "pct_cuda_time": 0.043603824388756186, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1752.012, "cuda_time_us": 170.206, "pct_cuda_time": 0.7340156793504732, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.976, "cuda_time_us": 86.815, "pct_cuda_time": 0.374390862853315, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.079, "pct_cuda_time": 0.37121685288890743, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 483.206, "cuda_time_us": 13.248, "pct_cuda_time": 0.05713217935933557, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.248, "pct_cuda_time": 0.05713217935933557, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 706.164, "cuda_time_us": 23.008, "pct_cuda_time": 0.09922231149604413, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.76, "pct_cuda_time": 0.02484007798231981, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.808, "pct_cuda_time": 0.06817221401814438, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.006210019495579953, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 187.536, "cuda_time_us": 47.135, "pct_cuda_time": 0.20327032564177852, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.135, "pct_cuda_time": 0.20327032564177852, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.934, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 453.989, "cuda_time_us": 517.658, "pct_cuda_time": 2.2324071333631443, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.914, "cuda_time_us": 322.812, "pct_cuda_time": 1.3921311204216362, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.076, "pct_cuda_time": 1.3889571104572287, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.874, "cuda_time_us": 45.567, "pct_cuda_time": 0.19650830441325814, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.567, "pct_cuda_time": 0.19650830441325814, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.738, "cuda_time_us": 149.279, "pct_cuda_time": 0.6437677085282499, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.279, "pct_cuda_time": 0.6437677085282499, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2680.91, "cuda_time_us": 708.4069999999999, "pct_cuda_time": 3.055014778337019, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.453, "cuda_time_us": 10.144, "pct_cuda_time": 0.04374613733552989, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.144, "pct_cuda_time": 0.04374613733552989, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1999.128, "cuda_time_us": 169.566, "pct_cuda_time": 0.7312556706857711, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.758, "cuda_time_us": 86.559, "pct_cuda_time": 0.37328685938743417, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.823, "pct_cuda_time": 0.3701128494230266, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 569.144, "cuda_time_us": 13.024, "pct_cuda_time": 0.0561661763266898, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.024, "pct_cuda_time": 0.0561661763266898, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 958.955, "cuda_time_us": 22.719, "pct_cuda_time": 0.09797599508338956, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.76, "pct_cuda_time": 0.02484007798231981, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.647, "pct_cuda_time": 0.06747789933843024, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.005658017762639513, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.986, "cuda_time_us": 47.264, "pct_cuda_time": 0.20382663988825758, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.264, "pct_cuda_time": 0.20382663988825758, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.11, "cuda_time_us": 9.984, "pct_cuda_time": 0.04305613516935434, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.984, "pct_cuda_time": 0.04305613516935434, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 459.839, "cuda_time_us": 518.713, "pct_cuda_time": 2.236956835146364, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.386, "cuda_time_us": 323.324, "pct_cuda_time": 1.3943391273533978, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.588, "pct_cuda_time": 1.3911651173889903, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.746, "cuda_time_us": 45.151, "pct_cuda_time": 0.19471429878120172, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.151, "pct_cuda_time": 0.19471429878120172, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.548, "cuda_time_us": 150.238, "pct_cuda_time": 0.6479034090117646, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.238, "pct_cuda_time": 0.6479034090117646, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2468.975, "cuda_time_us": 707.223, "pct_cuda_time": 3.04990876230732, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.413, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1774.15, "cuda_time_us": 170.333, "pct_cuda_time": 0.7345633685698751, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.637, "cuda_time_us": 87.006, "pct_cuda_time": 0.3752145529391871, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.003169697450868934, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.271, "pct_cuda_time": 0.37204485548831817, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 520.549, "cuda_time_us": 13.088, "pct_cuda_time": 0.05644217719316002, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.088, "pct_cuda_time": 0.05644217719316002, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 742.584, "cuda_time_us": 22.88, "pct_cuda_time": 0.09867030976310369, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.792, "pct_cuda_time": 0.02497807841555492, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.583, "pct_cuda_time": 0.06720189847196001, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.505, "pct_cuda_time": 0.0064903328755887705, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 219.299, "cuda_time_us": 47.359, "pct_cuda_time": 0.20423632867442432, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.359, "pct_cuda_time": 0.20423632867442432, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.499, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.807, "cuda_time_us": 516.794, "pct_cuda_time": 2.228681121665796, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 169.463, "cuda_time_us": 322.46, "pct_cuda_time": 1.3906131156560497, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.724, "pct_cuda_time": 1.3874391056916422, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.467, "cuda_time_us": 45.024, "pct_cuda_time": 0.19416660956179987, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.024, "pct_cuda_time": 0.19416660956179987, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.251, "cuda_time_us": 149.31, "pct_cuda_time": 0.6439013964479464, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.31, "pct_cuda_time": 0.6439013964479464, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2401.015, "cuda_time_us": 706.1960000000001, "pct_cuda_time": 3.0454798109031818, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.904, "cuda_time_us": 10.047, "pct_cuda_time": 0.043327823522285966, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.047, "pct_cuda_time": 0.043327823522285966, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1714.162, "cuda_time_us": 170.04399999999998, "pct_cuda_time": 0.7333170521572204, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.249, "cuda_time_us": 87.007, "pct_cuda_time": 0.3752188654527257, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.271, "pct_cuda_time": 0.37204485548831817, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 512.62, "cuda_time_us": 13.183, "pct_cuda_time": 0.05685186597932675, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.183, "pct_cuda_time": 0.05685186597932675, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 730.122, "cuda_time_us": 22.719, "pct_cuda_time": 0.09797599508338956, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.759, "pct_cuda_time": 0.024835765468781214, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.488, "pct_cuda_time": 0.06679220968579327, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.006348019928815063, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.865, "cuda_time_us": 47.135, "pct_cuda_time": 0.20327032564177852, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.135, "pct_cuda_time": 0.20327032564177852, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.445, "cuda_time_us": 9.888, "pct_cuda_time": 0.04264213386964901, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.888, "pct_cuda_time": 0.04264213386964901, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 470.55, "cuda_time_us": 516.2170000000001, "pct_cuda_time": 2.226192801354026, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.859, "cuda_time_us": 322.396, "pct_cuda_time": 1.3903371147895798, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.66, "pct_cuda_time": 1.387163104825172, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 117.993, "cuda_time_us": 44.927, "pct_cuda_time": 0.19374829574855595, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 44.927, "pct_cuda_time": 0.19374829574855595, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.46, "cuda_time_us": 148.894, "pct_cuda_time": 0.64210739081589, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 148.894, "pct_cuda_time": 0.64210739081589, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2419.625, "cuda_time_us": 708.344, "pct_cuda_time": 3.054743089984088, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.423, "cuda_time_us": 9.856, "pct_cuda_time": 0.0425041334364139, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.856, "pct_cuda_time": 0.0425041334364139, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1726.859, "cuda_time_us": 169.95000000000002, "pct_cuda_time": 0.7329116758845925, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.416, "cuda_time_us": 87.35900000000001, "pct_cuda_time": 0.3767368702183119, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.623, "pct_cuda_time": 0.3735628602539044, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 510.797, "cuda_time_us": 12.864, "pct_cuda_time": 0.05547617416051425, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.864, "pct_cuda_time": 0.05547617416051425, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 727.32, "cuda_time_us": 22.399, "pct_cuda_time": 0.09659599075103846, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.696, "pct_cuda_time": 0.024564077115849594, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.327, "pct_cuda_time": 0.06609789500607913, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.005934018629109732, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 194.689, "cuda_time_us": 47.328, "pct_cuda_time": 0.2041026407547278, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.328, "pct_cuda_time": 0.2041026407547278, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.358, "cuda_time_us": 10.081, "pct_cuda_time": 0.04347444898259827, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.081, "pct_cuda_time": 0.04347444898259827, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.577, "cuda_time_us": 518.457, "pct_cuda_time": 2.235852831680483, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 174.805, "cuda_time_us": 323.355, "pct_cuda_time": 1.3944728152730945, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.619, "pct_cuda_time": 1.391298805308687, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.409, "cuda_time_us": 45.408, "pct_cuda_time": 0.19582261476062118, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.408, "pct_cuda_time": 0.19582261476062118, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.991, "cuda_time_us": 149.694, "pct_cuda_time": 0.6455574016467677, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.694, "pct_cuda_time": 0.6455574016467677, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2370.301, "cuda_time_us": 709.8800000000001, "pct_cuda_time": 3.0613671107793734, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.364, "cuda_time_us": 10.912, "pct_cuda_time": 0.04705814773317254, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.912, "pct_cuda_time": 0.04705814773317254, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1669.657, "cuda_time_us": 170.335, "pct_cuda_time": 0.7345719935969524, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.652, "cuda_time_us": 87.552, "pct_cuda_time": 0.37756918533126116, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.816, "pct_cuda_time": 0.3743951753668537, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 482.368, "cuda_time_us": 13.152, "pct_cuda_time": 0.05671817805963024, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.152, "pct_cuda_time": 0.05671817805963024, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 718.82, "cuda_time_us": 22.464000000000002, "pct_cuda_time": 0.09687630413104728, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.696, "pct_cuda_time": 0.024564077115849594, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.456, "pct_cuda_time": 0.06665420925255816, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.005658017762639513, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 174.448, "cuda_time_us": 47.167, "pct_cuda_time": 0.20340832607501366, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.167, "pct_cuda_time": 0.20340832607501366, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.156, "cuda_time_us": 9.856, "pct_cuda_time": 0.0425041334364139, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.856, "pct_cuda_time": 0.0425041334364139, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 479.805, "cuda_time_us": 518.777, "pct_cuda_time": 2.2372328360128346, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.484, "cuda_time_us": 323.03499999999997, "pct_cuda_time": 1.3930928109407432, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.299, "pct_cuda_time": 1.3899188009763357, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.92, "cuda_time_us": 45.728, "pct_cuda_time": 0.1972026190929723, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.728, "pct_cuda_time": 0.1972026190929723, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 174.154, "cuda_time_us": 150.014, "pct_cuda_time": 0.6469374059791189, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.014, "pct_cuda_time": 0.6469374059791189, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2339.818, "cuda_time_us": 708.951, "pct_cuda_time": 3.0573607857020164, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.587, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1656.121, "cuda_time_us": 170.748, "pct_cuda_time": 0.736353061688393, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.408, "cuda_time_us": 87.23100000000001, "pct_cuda_time": 0.37618486848537147, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.495, "pct_cuda_time": 0.37301085852096394, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 471.435, "cuda_time_us": 13.055, "pct_cuda_time": 0.05629986424638631, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.055, "pct_cuda_time": 0.05629986424638631, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 718.575, "cuda_time_us": 22.911, "pct_cuda_time": 0.09880399768280022, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.888, "pct_cuda_time": 0.025392079715260252, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.06693021011902839, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.006481707848511576, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 173.012, "cuda_time_us": 47.551, "pct_cuda_time": 0.205064331273835, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.551, "pct_cuda_time": 0.205064331273835, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.81, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 450.079, "cuda_time_us": 518.139, "pct_cuda_time": 2.2344814523752095, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.61, "cuda_time_us": 323.164, "pct_cuda_time": 1.393649125187222, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.428, "pct_cuda_time": 1.3904751152228148, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 107.855, "cuda_time_us": 45.696, "pct_cuda_time": 0.19706461865973715, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.696, "pct_cuda_time": 0.19706461865973715, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.694, "cuda_time_us": 149.279, "pct_cuda_time": 0.6437677085282499, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.279, "pct_cuda_time": 0.6437677085282499, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2363.281, "cuda_time_us": 709.75, "pct_cuda_time": 3.0608064840193556, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.097, "cuda_time_us": 9.728, "pct_cuda_time": 0.041952131703473464, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.728, "pct_cuda_time": 0.041952131703473464, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1677.357, "cuda_time_us": 170.686, "pct_cuda_time": 0.7360856858489999, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.075, "cuda_time_us": 87.232, "pct_cuda_time": 0.37618918099891, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.496, "pct_cuda_time": 0.3730151710345025, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 454.856, "cuda_time_us": 13.28, "pct_cuda_time": 0.05727017979257067, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.28, "pct_cuda_time": 0.05727017979257067, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 715.876, "cuda_time_us": 22.783, "pct_cuda_time": 0.09825199594985978, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.76, "pct_cuda_time": 0.02484007798231981, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.583, "pct_cuda_time": 0.06720189847196001, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.006210019495579953, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.72, "cuda_time_us": 47.391, "pct_cuda_time": 0.2043743291076594, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.391, "pct_cuda_time": 0.2043743291076594, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.926, "cuda_time_us": 9.984, "pct_cuda_time": 0.04305613516935434, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.984, "pct_cuda_time": 0.04305613516935434, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.917, "cuda_time_us": 519.352, "pct_cuda_time": 2.2397125312975277, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 179.445, "cuda_time_us": 324.218, "pct_cuda_time": 1.398194514456904, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.003169697450868934, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 323.483, "pct_cuda_time": 1.3950248170060349, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.285, "cuda_time_us": 45.407, "pct_cuda_time": 0.19581830224708255, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.407, "pct_cuda_time": 0.19581830224708255, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.509, "cuda_time_us": 149.727, "pct_cuda_time": 0.6456997145935415, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.727, "pct_cuda_time": 0.6456997145935415, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2693.815, "cuda_time_us": 706.808, "pct_cuda_time": 3.0481190691888025, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.349, "cuda_time_us": 9.952, "pct_cuda_time": 0.04291813473611923, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.952, "pct_cuda_time": 0.04291813473611923, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2016.569, "cuda_time_us": 170.526, "pct_cuda_time": 0.7353956836828245, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.144, "cuda_time_us": 87.71, "pct_cuda_time": 0.3782505624703595, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.003169697450868934, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.975, "pct_cuda_time": 0.37508086501949056, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 507.227, "cuda_time_us": 13.152, "pct_cuda_time": 0.05671817805963024, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.152, "pct_cuda_time": 0.05671817805963024, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1040.171, "cuda_time_us": 22.625, "pct_cuda_time": 0.09757061881076143, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.921, "pct_cuda_time": 0.02553439266203396, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.424, "pct_cuda_time": 0.06651620881932306, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.005520017329404403, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.719, "cuda_time_us": 47.039, "pct_cuda_time": 0.2028563243420732, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.039, "pct_cuda_time": 0.2028563243420732, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.404, "cuda_time_us": 9.76, "pct_cuda_time": 0.04209013213670857, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.76, "pct_cuda_time": 0.04209013213670857, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 452.035, "cuda_time_us": 516.5699999999999, "pct_cuda_time": 2.2277151186331503, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.696, "cuda_time_us": 322.108, "pct_cuda_time": 1.3890951108904637, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.372, "pct_cuda_time": 1.3859211009260561, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.882, "cuda_time_us": 45.056, "pct_cuda_time": 0.194304609995035, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.056, "pct_cuda_time": 0.194304609995035, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.586, "cuda_time_us": 149.406, "pct_cuda_time": 0.6443153977476518, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.406, "pct_cuda_time": 0.6443153977476518, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2360.862, "cuda_time_us": 708.5980000000001, "pct_cuda_time": 3.0558384684228916, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.422, "cuda_time_us": 9.92, "pct_cuda_time": 0.042780134302884125, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.92, "pct_cuda_time": 0.042780134302884125, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1705.928, "cuda_time_us": 169.981, "pct_cuda_time": 0.7330453638042889, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.561, "cuda_time_us": 86.879, "pct_cuda_time": 0.3746668637197853, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.143, "pct_cuda_time": 0.3714928537553777, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 502.169, "cuda_time_us": 13.279, "pct_cuda_time": 0.05726586727903208, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.279, "pct_cuda_time": 0.05726586727903208, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 707.761, "cuda_time_us": 22.687, "pct_cuda_time": 0.09783799465015446, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.696, "pct_cuda_time": 0.024564077115849594, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.679, "pct_cuda_time": 0.06761589977166534, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.005658017762639513, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 180.237, "cuda_time_us": 47.136, "pct_cuda_time": 0.20327463815531716, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.136, "pct_cuda_time": 0.20327463815531716, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.637, "cuda_time_us": 9.759, "pct_cuda_time": 0.04208581962316998, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.759, "pct_cuda_time": 0.04208581962316998, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 434.746, "cuda_time_us": 518.9380000000001, "pct_cuda_time": 2.237927150692549, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.908, "cuda_time_us": 322.845, "pct_cuda_time": 1.39227343336841, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.003178322477946129, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.108, "pct_cuda_time": 1.3890951108904637, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.907, "cuda_time_us": 45.951, "pct_cuda_time": 0.19816430961207948, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.951, "pct_cuda_time": 0.19816430961207948, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.22, "cuda_time_us": 150.142, "pct_cuda_time": 0.6474894077120592, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.142, "pct_cuda_time": 0.6474894077120592, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2314.442, "cuda_time_us": 708.856, "pct_cuda_time": 3.0569510969158493, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.429, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1619.379, "cuda_time_us": 169.75900000000001, "pct_cuda_time": 0.7320879857987205, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 164.019, "cuda_time_us": 86.495, "pct_cuda_time": 0.37301085852096394, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.759, "pct_cuda_time": 0.3698368485565564, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 454.543, "cuda_time_us": 13.024, "pct_cuda_time": 0.0561661763266898, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.024, "pct_cuda_time": 0.0561661763266898, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 705.155, "cuda_time_us": 22.752, "pct_cuda_time": 0.09811830803016326, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.728, "pct_cuda_time": 0.024702077549084704, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.06693021011902839, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.006486020362050174, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 166.283, "cuda_time_us": 47.488, "pct_cuda_time": 0.20479264292090335, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.488, "pct_cuda_time": 0.20479264292090335, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.486, "cuda_time_us": 10.144, "pct_cuda_time": 0.04374613733552989, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.144, "pct_cuda_time": 0.04374613733552989, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.032, "cuda_time_us": 518.937, "pct_cuda_time": 2.23792283817901, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 178.289, "cuda_time_us": 323.387, "pct_cuda_time": 1.3946108157063293, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.651, "pct_cuda_time": 1.3914368057419217, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.983, "cuda_time_us": 45.824, "pct_cuda_time": 0.19761662039267763, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.824, "pct_cuda_time": 0.19761662039267763, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.677, "cuda_time_us": 149.726, "pct_cuda_time": 0.6456954020800029, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.726, "pct_cuda_time": 0.6456954020800029, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2380.018, "cuda_time_us": 708.4399999999999, "pct_cuda_time": 3.055157091283793, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.005, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1720.728, "cuda_time_us": 170.752, "pct_cuda_time": 0.7363703117425474, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.271, "cuda_time_us": 87.295, "pct_cuda_time": 0.3764608693518417, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.559, "pct_cuda_time": 0.37328685938743417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 513.336, "cuda_time_us": 13.12, "pct_cuda_time": 0.056580177626395126, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.12, "pct_cuda_time": 0.056580177626395126, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 745.679, "cuda_time_us": 22.849, "pct_cuda_time": 0.0985366218434072, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.632, "pct_cuda_time": 0.024288076249379374, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.06775821271843904, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.505, "pct_cuda_time": 0.0064903328755887705, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.813, "cuda_time_us": 47.488, "pct_cuda_time": 0.20479264292090335, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.488, "pct_cuda_time": 0.20479264292090335, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.938, "cuda_time_us": 9.824, "pct_cuda_time": 0.04236613300317879, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.824, "pct_cuda_time": 0.04236613300317879, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.006, "cuda_time_us": 517.848, "pct_cuda_time": 2.233226510935477, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.948, "cuda_time_us": 322.683, "pct_cuda_time": 1.391574806175157, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0033120103976426417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 321.915, "pct_cuda_time": 1.3882627957775144, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.964, "cuda_time_us": 45.247, "pct_cuda_time": 0.19512830008090706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.247, "pct_cuda_time": 0.19512830008090706, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.392, "cuda_time_us": 149.918, "pct_cuda_time": 0.6465234046794136, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.918, "pct_cuda_time": 0.6465234046794136, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2349.799, "cuda_time_us": 709.0459999999999, "pct_cuda_time": 3.0577704744881826, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.078, "cuda_time_us": 9.664, "pct_cuda_time": 0.041676130837003236, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.664, "pct_cuda_time": 0.041676130837003236, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1697.614, "cuda_time_us": 170.334, "pct_cuda_time": 0.7345676810834137, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.348, "cuda_time_us": 87.99900000000001, "pct_cuda_time": 0.37949687888301414, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 87.263, "pct_cuda_time": 0.37632286891860656, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 491.126, "cuda_time_us": 12.768, "pct_cuda_time": 0.05506217286080892, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.768, "pct_cuda_time": 0.05506217286080892, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 739.977, "cuda_time_us": 22.4, "pct_cuda_time": 0.09660030326457704, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.664, "pct_cuda_time": 0.024426076682614484, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.456, "pct_cuda_time": 0.06665420925255816, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.005520017329404403, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.382, "cuda_time_us": 47.167, "pct_cuda_time": 0.20340832607501366, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.167, "pct_cuda_time": 0.20340832607501366, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.054, "cuda_time_us": 9.888, "pct_cuda_time": 0.04264213386964901, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.888, "pct_cuda_time": 0.04264213386964901, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 437.523, "cuda_time_us": 519.16, "pct_cuda_time": 2.238884528698117, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.836, "cuda_time_us": 323.54699999999997, "pct_cuda_time": 1.3953008178725048, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.811, "pct_cuda_time": 1.3921268079080973, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.677, "cuda_time_us": 45.727, "pct_cuda_time": 0.19719830657943366, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.727, "pct_cuda_time": 0.19719830657943366, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.614, "cuda_time_us": 149.886, "pct_cuda_time": 0.6463854042461784, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.886, "pct_cuda_time": 0.6463854042461784, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2377.52, "cuda_time_us": 708.8199999999999, "pct_cuda_time": 3.05679584642846, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.138, "cuda_time_us": 9.888, "pct_cuda_time": 0.04264213386964901, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.888, "pct_cuda_time": 0.04264213386964901, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1693.328, "cuda_time_us": 169.373, "pct_cuda_time": 0.7304233555728218, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.668, "cuda_time_us": 85.983, "pct_cuda_time": 0.3708028515892022, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0033120103976426417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.215, "pct_cuda_time": 0.36749084119155956, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 482.07, "cuda_time_us": 13.6, "pct_cuda_time": 0.058650184124921775, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.6, "pct_cuda_time": 0.058650184124921775, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 737.671, "cuda_time_us": 22.751, "pct_cuda_time": 0.09811399551662467, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.888, "pct_cuda_time": 0.025392079715260252, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.583, "pct_cuda_time": 0.06720189847196001, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.005520017329404403, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.514, "cuda_time_us": 47.039, "pct_cuda_time": 0.2028563243420732, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.039, "pct_cuda_time": 0.2028563243420732, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.162, "cuda_time_us": 9.855, "pct_cuda_time": 0.04249982092287531, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.855, "pct_cuda_time": 0.04249982092287531, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.367, "cuda_time_us": 519.704, "pct_cuda_time": 2.2412305360631137, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.627, "cuda_time_us": 323.323, "pct_cuda_time": 1.394334814839859, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.587, "pct_cuda_time": 1.3911608048754516, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 108.708, "cuda_time_us": 46.047, "pct_cuda_time": 0.19857831091178477, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 46.047, "pct_cuda_time": 0.19857831091178477, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 141.288, "cuda_time_us": 150.334, "pct_cuda_time": 0.6483174103114699, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 150.334, "pct_cuda_time": 0.6483174103114699, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2435.96, "cuda_time_us": 707.894, "pct_cuda_time": 3.052802458891719, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.014, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.048, "pct_cuda_time": 0.043332136035824566, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1773.746, "cuda_time_us": 169.982, "pct_cuda_time": 0.7330496763178276, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 186.083, "cuda_time_us": 87.262, "pct_cuda_time": 0.376318556405068, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.003169697450868934, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.527, "pct_cuda_time": 0.3731488589541991, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 520.048, "cuda_time_us": 12.832, "pct_cuda_time": 0.055338173727279144, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.832, "pct_cuda_time": 0.055338173727279144, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 735.345, "cuda_time_us": 22.625, "pct_cuda_time": 0.09757061881076143, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.728, "pct_cuda_time": 0.024702077549084704, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.424, "pct_cuda_time": 0.06651620881932306, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.473, "pct_cuda_time": 0.006352332442353661, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 185.195, "cuda_time_us": 47.263, "pct_cuda_time": 0.20382232737471898, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.263, "pct_cuda_time": 0.20382232737471898, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.783, "cuda_time_us": 10.208, "pct_cuda_time": 0.04402213820200011, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.208, "pct_cuda_time": 0.04402213820200011, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 445.227, "cuda_time_us": 517.656, "pct_cuda_time": 2.2323985083360665, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.411, "cuda_time_us": 322.811, "pct_cuda_time": 1.3921268079080973, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.075, "pct_cuda_time": 1.3889527979436898, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.484, "cuda_time_us": 45.12, "pct_cuda_time": 0.1945806108615052, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.12, "pct_cuda_time": 0.1945806108615052, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.191, "cuda_time_us": 149.725, "pct_cuda_time": 0.6456910895664641, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.725, "pct_cuda_time": 0.6456910895664641, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2561.004, "cuda_time_us": 708.0859999999999, "pct_cuda_time": 3.0536304614911294, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.903, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1889.427, "cuda_time_us": 170.302, "pct_cuda_time": 0.7344296806501786, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.605, "cuda_time_us": 86.879, "pct_cuda_time": 0.3746668637197853, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.143, "pct_cuda_time": 0.3714928537553777, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 475.059, "cuda_time_us": 13.28, "pct_cuda_time": 0.05727017979257067, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.28, "pct_cuda_time": 0.05727017979257067, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 951.831, "cuda_time_us": 22.752, "pct_cuda_time": 0.09811830803016326, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.888, "pct_cuda_time": 0.025392079715260252, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.424, "pct_cuda_time": 0.06651620881932306, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.006210019495579953, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.655, "cuda_time_us": 47.391, "pct_cuda_time": 0.2043743291076594, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.391, "pct_cuda_time": 0.2043743291076594, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.746, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.016, "pct_cuda_time": 0.04319413560258945, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.308, "cuda_time_us": 517.752, "pct_cuda_time": 2.2328125096357723, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.94, "cuda_time_us": 322.93899999999996, "pct_cuda_time": 1.3926788096410376, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.203, "pct_cuda_time": 1.3895047996766303, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.745, "cuda_time_us": 45.599, "pct_cuda_time": 0.19664630484649326, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.599, "pct_cuda_time": 0.19664630484649326, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.83, "cuda_time_us": 149.214, "pct_cuda_time": 0.643487395148241, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.214, "pct_cuda_time": 0.643487395148241, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2479.306, "cuda_time_us": 709.4929999999999, "pct_cuda_time": 3.0596981680399358, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.265, "cuda_time_us": 10.175, "pct_cuda_time": 0.043879825255226414, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.175, "pct_cuda_time": 0.043879825255226414, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1790.39, "cuda_time_us": 170.078, "pct_cuda_time": 0.7334636776175328, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.209, "cuda_time_us": 87.58200000000001, "pct_cuda_time": 0.3776985607374191, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.846, "pct_cuda_time": 0.37452455077301156, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 549.961, "cuda_time_us": 12.799, "pct_cuda_time": 0.05519586078050543, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.799, "pct_cuda_time": 0.05519586078050543, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 746.014, "cuda_time_us": 22.560999999999996, "pct_cuda_time": 0.09729461794429119, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.728, "pct_cuda_time": 0.024702077549084704, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.06693021011902839, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.00566233027617811, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.25, "cuda_time_us": 47.136, "pct_cuda_time": 0.20327463815531716, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.136, "pct_cuda_time": 0.20327463815531716, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.05, "cuda_time_us": 9.792, "pct_cuda_time": 0.042228132569943684, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.792, "pct_cuda_time": 0.042228132569943684, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 466.843, "cuda_time_us": 519.448, "pct_cuda_time": 2.240126532597233, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.675, "cuda_time_us": 323.291, "pct_cuda_time": 1.3941968144066241, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.555, "pct_cuda_time": 1.3910228044422166, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.853, "cuda_time_us": 46.335, "pct_cuda_time": 0.19982031481090076, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 46.335, "pct_cuda_time": 0.19982031481090076, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.698, "cuda_time_us": 149.822, "pct_cuda_time": 0.6461094033797082, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.822, "pct_cuda_time": 0.6461094033797082, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2399.01, "cuda_time_us": 708.759, "pct_cuda_time": 3.0565327831026057, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.391, "cuda_time_us": 10.368, "pct_cuda_time": 0.04471214036817567, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.368, "pct_cuda_time": 0.04471214036817567, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1726.997, "cuda_time_us": 170.175, "pct_cuda_time": 0.7338819914307768, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.792, "cuda_time_us": 87.007, "pct_cuda_time": 0.3752188654527257, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.271, "pct_cuda_time": 0.37204485548831817, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.234, "cuda_time_us": 13.344, "pct_cuda_time": 0.0575461806590409, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.344, "pct_cuda_time": 0.0575461806590409, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 746.459, "cuda_time_us": 22.592000000000002, "pct_cuda_time": 0.09742830586398772, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.728, "pct_cuda_time": 0.024702077549084704, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.488, "pct_cuda_time": 0.06679220968579327, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.005934018629109732, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.819, "cuda_time_us": 47.232, "pct_cuda_time": 0.2036886394550225, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.232, "pct_cuda_time": 0.2036886394550225, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.562, "cuda_time_us": 10.239, "pct_cuda_time": 0.04415582612169663, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.239, "pct_cuda_time": 0.04415582612169663, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.566, "cuda_time_us": 517.977, "pct_cuda_time": 2.2337828251819567, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.401, "cuda_time_us": 323.22799999999995, "pct_cuda_time": 1.3939251260536922, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0033120103976426417, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.46, "pct_cuda_time": 1.3906131156560497, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.786, "cuda_time_us": 45.439, "pct_cuda_time": 0.1959563026803177, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.439, "pct_cuda_time": 0.1959563026803177, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.494, "cuda_time_us": 149.31, "pct_cuda_time": 0.6439013964479464, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.31, "pct_cuda_time": 0.6439013964479464, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2367.826, "cuda_time_us": 707.4169999999999, "pct_cuda_time": 3.050745389933808, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.726, "cuda_time_us": 9.76, "pct_cuda_time": 0.04209013213670857, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.76, "pct_cuda_time": 0.04209013213670857, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1689.672, "cuda_time_us": 169.567, "pct_cuda_time": 0.7312599831993097, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.086, "cuda_time_us": 86.495, "pct_cuda_time": 0.37301085852096394, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 85.759, "pct_cuda_time": 0.3698368485565564, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 460.583, "cuda_time_us": 12.768, "pct_cuda_time": 0.05506217286080892, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.768, "pct_cuda_time": 0.05506217286080892, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 772.602, "cuda_time_us": 22.720000000000002, "pct_cuda_time": 0.09798030759692816, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.696, "pct_cuda_time": 0.024564077115849594, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.06693021011902839, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.006486020362050174, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 171.866, "cuda_time_us": 47.584, "pct_cuda_time": 0.2052066442206087, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.584, "pct_cuda_time": 0.2052066442206087, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.594, "cuda_time_us": 10.049, "pct_cuda_time": 0.04333644854936316, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.049, "pct_cuda_time": 0.04333644854936316, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.159, "cuda_time_us": 518.0409999999999, "pct_cuda_time": 2.234058826048426, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.469, "cuda_time_us": 323.196, "pct_cuda_time": 1.3937871256204575, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.704, "pct_cuda_time": 0.0030360095311724217, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.492, "pct_cuda_time": 1.390751116089285, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 114.047, "cuda_time_us": 45.535, "pct_cuda_time": 0.19637030398002303, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.535, "pct_cuda_time": 0.19637030398002303, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.716, "cuda_time_us": 149.31, "pct_cuda_time": 0.6439013964479464, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.31, "pct_cuda_time": 0.6439013964479464, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2361.896, "cuda_time_us": 707.415, "pct_cuda_time": 3.0507367649067305, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.198, "cuda_time_us": 9.76, "pct_cuda_time": 0.04209013213670857, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.76, "pct_cuda_time": 0.04209013213670857, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1701.238, "cuda_time_us": 171.00600000000003, "pct_cuda_time": 0.7374656901813511, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.753, "cuda_time_us": 87.74300000000001, "pct_cuda_time": 0.3783928754171333, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 87.007, "pct_cuda_time": 0.3752188654527257, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.353, "cuda_time_us": 12.896, "pct_cuda_time": 0.055614174593749365, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 12.896, "pct_cuda_time": 0.055614174593749365, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 731.119, "cuda_time_us": 22.816, "pct_cuda_time": 0.09839430889663349, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.76, "pct_cuda_time": 0.02484007798231981, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.488, "pct_cuda_time": 0.06679220968579327, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.568, "pct_cuda_time": 0.006762021228520394, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 175.119, "cuda_time_us": 47.551, "pct_cuda_time": 0.205064331273835, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.551, "pct_cuda_time": 0.205064331273835, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.078, "cuda_time_us": 9.664, "pct_cuda_time": 0.041676130837003236, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.664, "pct_cuda_time": 0.041676130837003236, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.008, "cuda_time_us": 516.9849999999999, "pct_cuda_time": 2.229504811751668, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.498, "cuda_time_us": 322.811, "pct_cuda_time": 1.3921268079080973, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.075, "pct_cuda_time": 1.3889527979436898, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.383, "cuda_time_us": 45.152, "pct_cuda_time": 0.19471861129474033, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.152, "pct_cuda_time": 0.19471861129474033, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.508, "cuda_time_us": 149.022, "pct_cuda_time": 0.6426593925488303, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.022, "pct_cuda_time": 0.6426593925488303, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2357.609, "cuda_time_us": 709.012, "pct_cuda_time": 3.0576238490278707, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.806, "cuda_time_us": 9.695, "pct_cuda_time": 0.04180981875669976, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 9.695, "pct_cuda_time": 0.04180981875669976, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1651.382, "cuda_time_us": 170.109, "pct_cuda_time": 0.7335973655372294, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.556, "cuda_time_us": 87.29400000000001, "pct_cuda_time": 0.37645655683830315, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 86.558, "pct_cuda_time": 0.3732825468738956, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1024, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 470.187, "cuda_time_us": 13.088, "pct_cuda_time": 0.05644217719316002, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 13.088, "pct_cuda_time": 0.05644217719316002, "trace": "_C::rotary_embedding(int64[1024], bfloat16[1024, 4096], bfloat16[1024, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 728.779, "cuda_time_us": 22.527, "pct_cuda_time": 0.0971479924839789, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 5.888, "pct_cuda_time": 0.025392079715260252, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1024], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 15.359, "pct_cuda_time": 0.06623589543931424, "trace": "_vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.005520017329404403, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], None, None, bfloat16[1024, 32, 128], int32[9], int32[9], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1024, 32, 128], bfloat16[1024, 8, 128], bfloat16[1024, 8, 128], bfloat16[1024, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.7, "cuda_time_us": 47.2, "pct_cuda_time": 0.20355063902178736, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x256x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 47.2, "pct_cuda_time": 0.20355063902178736, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1024, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.359, "cuda_time_us": 10.175, "pct_cuda_time": 0.043879825255226414, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.175, "pct_cuda_time": 0.043879825255226414, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.394, "cuda_time_us": 519.0329999999999, "pct_cuda_time": 2.238336839478715, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.218, "cuda_time_us": 323.51599999999996, "pct_cuda_time": 1.3951671299528083, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 322.78, "pct_cuda_time": 1.3919931199884008, "trace": "mm(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1024, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1024, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 135.192, "cuda_time_us": 45.919, "pct_cuda_time": 0.19802630917884434, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 45.919, "pct_cuda_time": 0.19802630917884434, "trace": "_C::silu_and_mul(bfloat16[1024, 14336], bfloat16[1024, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.787, "cuda_time_us": 149.598, "pct_cuda_time": 0.6451434003470624, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 149.598, "pct_cuda_time": 0.6451434003470624, "trace": "mm(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1024, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1024, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.851, "cuda_time_us": 10.208, "pct_cuda_time": 0.04402213820200011, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 10.208, "pct_cuda_time": 0.04402213820200011, "trace": "_C::fused_add_rms_norm(bfloat16[1024, 4096], bfloat16[1024, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 442.803, "cuda_time_us": 363.13100000000003, "pct_cuda_time": 1.5660073537843364, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 5.6, "pct_cuda_time": 0.02415007581614426, "trace": "index_select(bfloat16[1024, 4096], 0, int64[8])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.0031740099644075315, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[8, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 356.795, "pct_cuda_time": 1.5386832680037843, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[8, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3666.118, "cuda_time_us": 120.00000000000001, "pct_cuda_time": 0.5175016246316628, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.704, "pct_cuda_time": 0.0030360095311724217, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.003169697450868934, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.003450010830877752, "trace": "copy_(int32[8], int32[8], True) <- _to_copy(int32[8], 3, 0, None, None, True, None) <- to(int32[8], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0033120103976426417, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.801, "pct_cuda_time": 0.003454323344416349, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.003450010830877752, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.003450010830877752, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 4.736, "pct_cuda_time": 0.02042406411879629, "trace": "copy_(float32[8, 128256], bfloat16[8, 128256], False) <- _to_copy(bfloat16[8, 128256], 6, None, None, None, False, None) <- to(bfloat16[8, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 6.432, "pct_cuda_time": 0.027738087080257125, "trace": "div_(float32[8, 128256], bfloat16[8, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 35.2, "pct_cuda_time": 0.15180047655862108, "trace": "_softmax(float32[8, 128256], -1, False) <- softmax(float32[8, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.128, "pct_cuda_time": 0.12130238081366175, "trace": "_log_softmax(float32[8, 128256], -1, False) <- log_softmax(float32[8, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.92, "pct_cuda_time": 0.008280025994106604, "trace": "copy_(int64[8], int32[8], False) <- _to_copy(int32[8], 4, None, None, None, False, None) <- to(int32[8], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 7.296, "pct_cuda_time": 0.0314640987776051, "trace": "index(float32[8, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 27.712, "pct_cuda_time": 0.11950837518160531, "trace": "argmax(float32[8, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.013662042890275899, "trace": "copy_(int64[8], int64[8], False) <- _to_copy(int64[8], 4, 0, None, None, False, None) <- to(int64[8], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] }, "decode_1": { "metadata": { "num_running_seqs": 8 }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 6367.842000000001, "pct_cuda_time": 93.14685167385447, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 7.488, "pct_cuda_time": 0.10953218144134579, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 7.488, "pct_cuda_time": 0.10953218144134579, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 6357.346, "pct_cuda_time": 92.99331938533838, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 199.90099999999998, "pct_cuda_time": 2.924090892402038, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.16, "pct_cuda_time": 0.06085121191185877, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 195.74099999999999, "pct_cuda_time": 2.8632396804901794, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 1850.8499999999997, "pct_cuda_time": 27.073669607467256, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 678.7440000000001, "pct_cuda_time": 9.928460331226605, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 678.7440000000001, "pct_cuda_time": 9.928460331226605, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 119.93200000000004, "pct_cuda_time": 1.754328737262752, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 119.93200000000004, "pct_cuda_time": 1.754328737262752, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 482.93399999999997, "pct_cuda_time": 7.064211339769616, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 81.15, "pct_cuda_time": 1.1870374631363798, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 359.7039999999999, "pct_cuda_time": 5.26164046383251, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 42.080000000000005, "pct_cuda_time": 0.6155334128007254, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 569.2400000000001, "pct_cuda_time": 8.326669199208292, "invocations": 32 }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cuda_time_us": 501.01599999999996, "pct_cuda_time": 7.328709323853805, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cuda_time_us": 68.224, "pct_cuda_time": 0.9979598753544838, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 4306.594999999999, "pct_cuda_time": 62.99555888546908, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 2620.2509999999993, "pct_cuda_time": 38.32823289982207, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2620.2509999999993, "pct_cuda_time": 38.32823289982207, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 291.67400000000004, "pct_cuda_time": 4.266518361341225, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 291.67400000000004, "pct_cuda_time": 4.266518361341225, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 1394.6699999999998, "pct_cuda_time": 20.400807624305784, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 1394.6699999999998, "pct_cuda_time": 20.400807624305784, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 349.083, "pct_cuda_time": 5.106279713420047, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 5.6, "pct_cuda_time": 0.08191509295827142, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.010765983645944244, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 342.747, "pct_cuda_time": 5.013598636815832, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 119.422, "pct_cuda_time": 1.7468686127254802, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.374999999999999, "pct_cuda_time": 0.07862386154476943, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 4.833, "pct_cuda_time": 0.07069565076202246, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 6.4, "pct_cuda_time": 0.09361724909516735, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 34.976, "pct_cuda_time": 0.5116182663050894, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.351, "pct_cuda_time": 0.4147097857964202, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 2.016, "pct_cuda_time": 0.02948943346497771, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 7.392, "pct_cuda_time": 0.10812792270491828, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 27.615, "pct_cuda_time": 0.40394380215047593, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.464, "pct_cuda_time": 0.036042640901639424, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 84812.452, "cuda_time_us": 6367.842000000001, "pct_cuda_time": 93.14685167385447, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 365.763, "cuda_time_us": 7.488, "pct_cuda_time": 0.10953218144134579, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 7.488, "pct_cuda_time": 0.10953218144134579, "trace": "index_select(bfloat16[128256, 4096], 0, int64[8]) <- embedding(bfloat16[128256, 4096], int64[8], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 5171.138, "cuda_time_us": 207.45299999999997, "pct_cuda_time": 3.0345592463343354, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 356.146, "cuda_time_us": 4.16, "pct_cuda_time": 0.06085121191185877, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.16, "pct_cuda_time": 0.06085121191185877, "trace": "_C::rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 3804.221, "cuda_time_us": 64.127, "pct_cuda_time": 0.9380302082384054, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 823.026, "cuda_time_us": 25.792, "pct_cuda_time": 0.37727751385352437, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 25.792, "pct_cuda_time": 0.37727751385352437, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1054.458, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1235.124, "cuda_time_us": 15.808, "pct_cuda_time": 0.23123460526506331, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.03510646841068775, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.936, "pct_cuda_time": 0.17459616956248708, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.021531967291888488, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 382.872, "cuda_time_us": 18.686999999999998, "pct_cuda_time": 0.27334773966271747, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 16.575, "pct_cuda_time": 0.2424540474613123, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 130.436, "cuda_time_us": 3.168, "pct_cuda_time": 0.04634053830210783, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04634053830210783, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 715.324, "cuda_time_us": 135.998, "pct_cuda_time": 1.9893372878819635, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 300.09, "cuda_time_us": 83.615, "pct_cuda_time": 1.2230947317331902, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.615, "pct_cuda_time": 1.2230947317331902, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 161.096, "cuda_time_us": 8.96, "pct_cuda_time": 0.1310641487332343, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.1310641487332343, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 176.02, "cuda_time_us": 43.423, "pct_cuda_time": 0.6351784074155393, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.423, "pct_cuda_time": 0.6351784074155393, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2693.506, "cuda_time_us": 201.887, "pct_cuda_time": 2.9531414950118826, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.301, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1912.746, "cuda_time_us": 57.248, "pct_cuda_time": 0.8374062931562718, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.92, "cuda_time_us": 20.383, "pct_cuda_time": 0.29815631067293685, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.383, "pct_cuda_time": 0.29815631067293685, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 550.734, "cuda_time_us": 3.776, "pct_cuda_time": 0.055234176966148726, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.055234176966148726, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 842.796, "cuda_time_us": 15.137, "pct_cuda_time": 0.22141942180524185, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03744689963806694, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.201, "pct_cuda_time": 0.16384481361171396, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.020127708555460977, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 183.755, "cuda_time_us": 17.951999999999998, "pct_cuda_time": 0.2625963837119443, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.808, "pct_cuda_time": 0.23123460526506331, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.771, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 510.354, "cuda_time_us": 138.495, "pct_cuda_time": 2.02586264272425, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 185.396, "cuda_time_us": 84.799, "pct_cuda_time": 1.2404139228157962, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.799, "pct_cuda_time": 1.2404139228157962, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 106.497, "cuda_time_us": 9.088, "pct_cuda_time": 0.1329364937151376, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.1329364937151376, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.717, "cuda_time_us": 44.608, "pct_cuda_time": 0.6525122261933163, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.608, "pct_cuda_time": 0.6525122261933163, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2540.4, "cuda_time_us": 200.41299999999998, "pct_cuda_time": 2.9315802723296516, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 107.781, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1743.582, "cuda_time_us": 57.536, "pct_cuda_time": 0.8416190693655543, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.12, "cuda_time_us": 20.896, "pct_cuda_time": 0.3056603182957214, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.896, "pct_cuda_time": 0.3056603182957214, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 535.789, "cuda_time_us": 3.744, "pct_cuda_time": 0.05476609072067289, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.744, "pct_cuda_time": 0.05476609072067289, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 731.141, "cuda_time_us": 14.943999999999999, "pct_cuda_time": 0.21859627663721573, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.163362099671067, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 164.573, "cuda_time_us": 17.952, "pct_cuda_time": 0.2625963837119444, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.839, "pct_cuda_time": 0.23168806381536805, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.113, "pct_cuda_time": 0.030908319896576338, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.05, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 482.644, "cuda_time_us": 136.66899999999998, "pct_cuda_time": 1.999152471341785, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.499, "cuda_time_us": 83.838, "pct_cuda_time": 1.2263567077563498, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.838, "pct_cuda_time": 1.2263567077563498, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 109.736, "cuda_time_us": 9.216, "pct_cuda_time": 0.13480883869704097, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.216, "pct_cuda_time": 0.13480883869704097, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.592, "cuda_time_us": 43.615, "pct_cuda_time": 0.6379869248883943, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.615, "pct_cuda_time": 0.6379869248883943, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2459.178, "cuda_time_us": 198.779, "pct_cuda_time": 2.907678618420042, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.172, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1753.941, "cuda_time_us": 57.758, "pct_cuda_time": 0.844866417693543, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.601, "cuda_time_us": 21.28, "pct_cuda_time": 0.3112773532414314, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.28, "pct_cuda_time": 0.3112773532414314, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 518.29, "cuda_time_us": 3.615, "pct_cuda_time": 0.05287911804359843, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.615, "pct_cuda_time": 0.05287911804359843, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 766.693, "cuda_time_us": 15.039, "pct_cuda_time": 0.21998590767847212, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.231, "pct_cuda_time": 0.16428364446684757, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019191536064509306, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 168.743, "cuda_time_us": 17.823999999999998, "pct_cuda_time": 0.26072403873004096, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.22983034652863582, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.916, "cuda_time_us": 3.103, "pct_cuda_time": 0.04538973811598504, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.04538973811598504, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.195, "cuda_time_us": 134.878, "pct_cuda_time": 1.9729542692903093, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.281, "cuda_time_us": 82.175, "pct_cuda_time": 1.2020308506867774, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.175, "pct_cuda_time": 1.2020308506867774, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.345, "cuda_time_us": 9.28, "pct_cuda_time": 0.13574501118799265, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.28, "pct_cuda_time": 0.13574501118799265, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.06, "cuda_time_us": 43.423, "pct_cuda_time": 0.6351784074155393, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.423, "pct_cuda_time": 0.6351784074155393, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2422.505, "cuda_time_us": 199.421, "pct_cuda_time": 2.9170695987199005, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.918, "cuda_time_us": 3.071, "pct_cuda_time": 0.044921651870509206, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.044921651870509206, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1694.108, "cuda_time_us": 58.688, "pct_cuda_time": 0.8584701742026845, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 143.651, "cuda_time_us": 22.24, "pct_cuda_time": 0.32531994060570646, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.24, "pct_cuda_time": 0.32531994060570646, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 477.339, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 739.174, "cuda_time_us": 14.943999999999999, "pct_cuda_time": 0.21859627663721573, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.163362099671067, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.734, "cuda_time_us": 17.664, "pct_cuda_time": 0.25838360750266187, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22748991530125662, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.053, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 498.156, "cuda_time_us": 134.526, "pct_cuda_time": 1.9678053205900752, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 174.4, "cuda_time_us": 81.822, "pct_cuda_time": 1.1968672742913722, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.822, "pct_cuda_time": 1.1968672742913722, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.219, "cuda_time_us": 9.184, "pct_cuda_time": 0.13434075245156513, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.13434075245156513, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.867, "cuda_time_us": 43.52, "pct_cuda_time": 0.6365972938471379, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.52, "pct_cuda_time": 0.6365972938471379, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2640.671, "cuda_time_us": 197.53300000000002, "pct_cuda_time": 2.889452510236827, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.178, "cuda_time_us": 3.041, "pct_cuda_time": 0.0444828210153756, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.041, "pct_cuda_time": 0.0444828210153756, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1907.797, "cuda_time_us": 57.086, "pct_cuda_time": 0.8350366065385504, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.532, "cuda_time_us": 20.416, "pct_cuda_time": 0.2986390246135838, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.416, "pct_cuda_time": 0.2986390246135838, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 542.477, "cuda_time_us": 3.935, "pct_cuda_time": 0.0575599804983568, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.935, "pct_cuda_time": 0.0575599804983568, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 878.984, "cuda_time_us": 15.103, "pct_cuda_time": 0.2209220801694238, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0369788133925911, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.231, "pct_cuda_time": 0.16428364446684757, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.019659622309985143, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.829, "cuda_time_us": 17.631999999999998, "pct_cuda_time": 0.257915521257186, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.22702182905578078, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.276, "cuda_time_us": 3.616, "pct_cuda_time": 0.052893745738769544, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.052893745738769544, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 490.224, "cuda_time_us": 133.79000000000002, "pct_cuda_time": 1.9570393369441312, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 176.317, "cuda_time_us": 80.799, "pct_cuda_time": 1.1819031421313166, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.799, "pct_cuda_time": 1.1819031421313166, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 111.061, "cuda_time_us": 8.992, "pct_cuda_time": 0.13153223497871014, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.13153223497871014, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.89, "cuda_time_us": 43.999, "pct_cuda_time": 0.6436039598341043, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.999, "pct_cuda_time": 0.6436039598341043, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2418.716, "cuda_time_us": 198.78, "pct_cuda_time": 2.907693246115213, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.24, "cuda_time_us": 3.071, "pct_cuda_time": 0.044921651870509206, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.044921651870509206, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1685.213, "cuda_time_us": 58.271, "pct_cuda_time": 0.8523704253163276, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.284, "cuda_time_us": 21.472, "pct_cuda_time": 0.31408587071428645, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.472, "pct_cuda_time": 0.31408587071428645, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.757, "cuda_time_us": 3.776, "pct_cuda_time": 0.055234176966148726, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.055234176966148726, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 732.483, "cuda_time_us": 14.943999999999999, "pct_cuda_time": 0.21859627663721573, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.163362099671067, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.528, "cuda_time_us": 18.079, "pct_cuda_time": 0.2644541009986766, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.967, "pct_cuda_time": 0.2335604087972714, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 108.193, "cuda_time_us": 3.264, "pct_cuda_time": 0.04774479703853534, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.04774479703853534, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 477.454, "cuda_time_us": 134.174, "pct_cuda_time": 1.9626563718898409, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 172.089, "cuda_time_us": 82.111, "pct_cuda_time": 1.2010946781958258, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.111, "pct_cuda_time": 1.2010946781958258, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.718, "cuda_time_us": 9.024, "pct_cuda_time": 0.13200032122418592, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.13200032122418592, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.744, "cuda_time_us": 43.039, "pct_cuda_time": 0.6295613724698292, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.039, "pct_cuda_time": 0.6295613724698292, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2420.477, "cuda_time_us": 198.014, "pct_cuda_time": 2.8964884316141353, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.607, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1723.333, "cuda_time_us": 57.248, "pct_cuda_time": 0.8374062931562718, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.396, "cuda_time_us": 20.608, "pct_cuda_time": 0.30144754208643887, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.30144754208643887, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 505.25, "cuda_time_us": 3.775, "pct_cuda_time": 0.05521954927097761, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.775, "pct_cuda_time": 0.05521954927097761, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.862, "cuda_time_us": 15.169, "pct_cuda_time": 0.22188750805071772, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.03838307212901861, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.16429827216201867, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.01920616375968042, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.488, "cuda_time_us": 17.695999999999998, "pct_cuda_time": 0.25885169374813766, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22748991530125662, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.962, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 475.182, "cuda_time_us": 134.494, "pct_cuda_time": 1.9673372343445992, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 162.208, "cuda_time_us": 81.567, "pct_cuda_time": 1.1931372120227366, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.567, "pct_cuda_time": 1.1931372120227366, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.813, "cuda_time_us": 9.024, "pct_cuda_time": 0.13200032122418592, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.13200032122418592, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.191, "cuda_time_us": 43.903, "pct_cuda_time": 0.6421997010976768, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.903, "pct_cuda_time": 0.6421997010976768, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2373.38, "cuda_time_us": 199.291, "pct_cuda_time": 2.9151679983476555, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.772, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1685.185, "cuda_time_us": 58.717, "pct_cuda_time": 0.858894377362647, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.083, "cuda_time_us": 21.567, "pct_cuda_time": 0.3154755017555428, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.567, "pct_cuda_time": 0.3154755017555428, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 478.563, "cuda_time_us": 3.872, "pct_cuda_time": 0.056638435702576234, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.056638435702576234, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 745.0, "cuda_time_us": 15.168, "pct_cuda_time": 0.2218728803555466, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.72, "pct_cuda_time": 0.03978733086544612, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.163362099671067, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 158.672, "cuda_time_us": 18.11, "pct_cuda_time": 0.2649075595489813, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.775, "pct_cuda_time": 0.23075189132441637, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.335, "pct_cuda_time": 0.03415566822456496, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.402, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 467.111, "cuda_time_us": 134.366, "pct_cuda_time": 1.965464889362696, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.976, "cuda_time_us": 82.143, "pct_cuda_time": 1.2015627644413016, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.143, "pct_cuda_time": 1.2015627644413016, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.256, "cuda_time_us": 9.12, "pct_cuda_time": 0.13340457996061345, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.13340457996061345, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.234, "cuda_time_us": 43.103, "pct_cuda_time": 0.6304975449607809, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.103, "pct_cuda_time": 0.6304975449607809, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2589.826, "cuda_time_us": 196.603, "pct_cuda_time": 2.875848753727685, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.353, "cuda_time_us": 3.103, "pct_cuda_time": 0.04538973811598504, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.04538973811598504, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1826.0, "cuda_time_us": 57.150999999999996, "pct_cuda_time": 0.8359874067246732, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 230.473, "cuda_time_us": 20.672, "pct_cuda_time": 0.30238371457739055, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.672, "pct_cuda_time": 0.30238371457739055, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 523.983, "cuda_time_us": 3.776, "pct_cuda_time": 0.055234176966148726, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.055234176966148726, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 742.234, "cuda_time_us": 15.007, "pct_cuda_time": 0.2195178214329963, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.495, "pct_cuda_time": 0.03649609945194414, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.16429827216201867, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 166.219, "cuda_time_us": 17.695999999999998, "pct_cuda_time": 0.25885169374813766, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.22795800154673246, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.087, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 531.167, "cuda_time_us": 133.341, "pct_cuda_time": 1.9504715018122982, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.656, "cuda_time_us": 80.959, "pct_cuda_time": 1.1842435733586958, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.959, "pct_cuda_time": 1.1842435733586958, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.197, "cuda_time_us": 9.119, "pct_cuda_time": 0.13338995226544234, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.119, "pct_cuda_time": 0.13338995226544234, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 208.939, "cuda_time_us": 43.263, "pct_cuda_time": 0.6328379761881601, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.263, "pct_cuda_time": 0.6328379761881601, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2432.062, "cuda_time_us": 199.839, "pct_cuda_time": 2.923183975301429, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.642, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1703.592, "cuda_time_us": 58.464, "pct_cuda_time": 0.8551935704843536, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.876, "cuda_time_us": 22.112, "pct_cuda_time": 0.3234475956238031, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.112, "pct_cuda_time": 0.3234475956238031, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 479.507, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 748.669, "cuda_time_us": 15.008, "pct_cuda_time": 0.2195324491281674, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03744689963806694, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.163362099671067, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 153.965, "cuda_time_us": 17.664, "pct_cuda_time": 0.25838360750266187, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.22748991530125662, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.378, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.045872452056632, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 505.406, "cuda_time_us": 135.199, "pct_cuda_time": 1.977649759440239, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 172.57, "cuda_time_us": 82.336, "pct_cuda_time": 1.204385909609328, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.336, "pct_cuda_time": 1.204385909609328, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.037, "cuda_time_us": 8.992, "pct_cuda_time": 0.13153223497871014, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.13153223497871014, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 163.195, "cuda_time_us": 43.871, "pct_cuda_time": 0.641731614852201, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.871, "pct_cuda_time": 0.641731614852201, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2595.509, "cuda_time_us": 197.88500000000002, "pct_cuda_time": 2.894601458937061, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.279, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1896.811, "cuda_time_us": 56.96000000000001, "pct_cuda_time": 0.8331935169469893, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.078, "cuda_time_us": 20.545, "pct_cuda_time": 0.3005259972906583, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.545, "pct_cuda_time": 0.3005259972906583, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 505.086, "cuda_time_us": 3.648, "pct_cuda_time": 0.053361831984245385, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053361831984245385, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 820.755, "cuda_time_us": 15.04, "pct_cuda_time": 0.22000053537364322, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0369788133925911, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.16429827216201867, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.356, "cuda_time_us": 17.727, "pct_cuda_time": 0.2593051522984424, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.22795800154673246, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.143, "pct_cuda_time": 0.031347150751709935, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.694, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 483.058, "cuda_time_us": 134.845, "pct_cuda_time": 1.9724715553496623, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.223, "cuda_time_us": 82.111, "pct_cuda_time": 1.2010946781958258, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.111, "pct_cuda_time": 1.2010946781958258, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 114.433, "cuda_time_us": 9.151, "pct_cuda_time": 0.13385803851091818, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.151, "pct_cuda_time": 0.13385803851091818, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.812, "cuda_time_us": 43.583, "pct_cuda_time": 0.6375188386429185, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.583, "pct_cuda_time": 0.6375188386429185, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2621.696, "cuda_time_us": 198.878, "pct_cuda_time": 2.909126760241983, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.486, "cuda_time_us": 3.073, "pct_cuda_time": 0.04495090726085144, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.073, "pct_cuda_time": 0.04495090726085144, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1902.508, "cuda_time_us": 58.686, "pct_cuda_time": 0.8584409188123423, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.298, "cuda_time_us": 22.208, "pct_cuda_time": 0.3248518543602307, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.208, "pct_cuda_time": 0.3248518543602307, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 479.697, "cuda_time_us": 3.775, "pct_cuda_time": 0.05521954927097761, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.775, "pct_cuda_time": 0.05521954927097761, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 946.886, "cuda_time_us": 15.039, "pct_cuda_time": 0.21998590767847212, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.03838307212901861, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.135, "pct_cuda_time": 0.16287938573042005, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 173.055, "cuda_time_us": 17.664, "pct_cuda_time": 0.25838360750266187, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.22702182905578078, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.85, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 484.525, "cuda_time_us": 134.047, "pct_cuda_time": 1.9607986546031086, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 177.966, "cuda_time_us": 81.919, "pct_cuda_time": 1.1982861607229707, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.919, "pct_cuda_time": 1.1982861607229707, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.721, "cuda_time_us": 9.184, "pct_cuda_time": 0.13434075245156513, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.13434075245156513, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.125, "cuda_time_us": 42.944, "pct_cuda_time": 0.6281717414285729, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 42.944, "pct_cuda_time": 0.6281717414285729, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2485.21, "cuda_time_us": 197.31000000000003, "pct_cuda_time": 2.886190534213667, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.545, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1745.469, "cuda_time_us": 57.024, "pct_cuda_time": 0.8341296894379411, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 151.512, "cuda_time_us": 20.512, "pct_cuda_time": 0.3000432833500113, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.512, "pct_cuda_time": 0.3000432833500113, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 483.86, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 771.543, "cuda_time_us": 15.136, "pct_cuda_time": 0.22140479411007075, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.03838307212901861, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.2, "pct_cuda_time": 0.16383018591654283, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019191536064509306, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 166.88, "cuda_time_us": 17.695999999999998, "pct_cuda_time": 0.25885169374813766, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.22795800154673246, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.361, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 497.002, "cuda_time_us": 134.20600000000002, "pct_cuda_time": 1.9631244581353169, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.212, "cuda_time_us": 81.759, "pct_cuda_time": 1.1959457294955915, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.759, "pct_cuda_time": 1.1959457294955915, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.218, "cuda_time_us": 8.991, "pct_cuda_time": 0.13151760728353898, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.991, "pct_cuda_time": 0.13151760728353898, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 161.575, "cuda_time_us": 43.456, "pct_cuda_time": 0.6356611213561862, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.456, "pct_cuda_time": 0.6356611213561862, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2547.914, "cuda_time_us": 198.94, "pct_cuda_time": 2.910033677342592, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.039, "cuda_time_us": 3.104, "pct_cuda_time": 0.04540436581115616, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04540436581115616, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1835.944, "cuda_time_us": 57.983, "pct_cuda_time": 0.8481576491070449, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 179.506, "cuda_time_us": 21.279, "pct_cuda_time": 0.3112627255462603, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.279, "pct_cuda_time": 0.3112627255462603, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 517.639, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 810.244, "cuda_time_us": 15.04, "pct_cuda_time": 0.22000053537364322, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.136, "pct_cuda_time": 0.16289401342559115, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.020595794800936814, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 159.494, "cuda_time_us": 17.823999999999998, "pct_cuda_time": 0.26072403873004096, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.68, "pct_cuda_time": 0.22936226028315998, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.871, "cuda_time_us": 3.232, "pct_cuda_time": 0.047276710793059507, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047276710793059507, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 481.015, "cuda_time_us": 134.62099999999998, "pct_cuda_time": 1.9691949516313312, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 177.69, "cuda_time_us": 82.014, "pct_cuda_time": 1.1996757917642271, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.014, "pct_cuda_time": 1.1996757917642271, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.913, "cuda_time_us": 9.408, "pct_cuda_time": 0.13761735616989598, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.408, "pct_cuda_time": 0.13761735616989598, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.519, "cuda_time_us": 43.199, "pct_cuda_time": 0.6319018036972084, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.199, "pct_cuda_time": 0.6319018036972084, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2475.111, "cuda_time_us": 196.79699999999997, "pct_cuda_time": 2.8786865265908816, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.309, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1728.502, "cuda_time_us": 56.830999999999996, "pct_cuda_time": 0.8313065442699148, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.233, "cuda_time_us": 20.511, "pct_cuda_time": 0.3000286556548402, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.511, "pct_cuda_time": 0.3000286556548402, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.886, "cuda_time_us": 3.648, "pct_cuda_time": 0.053361831984245385, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053361831984245385, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 766.059, "cuda_time_us": 15.104, "pct_cuda_time": 0.2209367078645949, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.16429827216201867, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.020127708555460977, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 168.827, "cuda_time_us": 17.567999999999998, "pct_cuda_time": 0.2569793487662343, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.424, "pct_cuda_time": 0.22561757031935328, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.79, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 507.437, "cuda_time_us": 133.75799999999998, "pct_cuda_time": 1.956571250698655, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 187.944, "cuda_time_us": 81.279, "pct_cuda_time": 1.1889244358134539, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.279, "pct_cuda_time": 1.1889244358134539, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.522, "cuda_time_us": 8.991, "pct_cuda_time": 0.13151760728353898, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.991, "pct_cuda_time": 0.13151760728353898, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.716, "cuda_time_us": 43.488, "pct_cuda_time": 0.636129207601662, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.488, "pct_cuda_time": 0.636129207601662, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2553.231, "cuda_time_us": 198.90999999999997, "pct_cuda_time": 2.909594846487458, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.701, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1839.781, "cuda_time_us": 58.432, "pct_cuda_time": 0.8547254842388777, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 157.13, "cuda_time_us": 21.824, "pct_cuda_time": 0.31923481941452064, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.824, "pct_cuda_time": 0.31923481941452064, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 539.481, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 799.953, "cuda_time_us": 15.231, "pct_cuda_time": 0.22279442515132716, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0369788133925911, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.36, "pct_cuda_time": 0.16617061714392203, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.343, "pct_cuda_time": 0.01964499461481402, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.973, "cuda_time_us": 17.697, "pct_cuda_time": 0.2588663214433088, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.22795800154673246, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.113, "pct_cuda_time": 0.030908319896576338, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.69, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.019, "cuda_time_us": 134.36599999999999, "pct_cuda_time": 1.9654648893626958, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 166.874, "cuda_time_us": 81.759, "pct_cuda_time": 1.1959457294955915, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.759, "pct_cuda_time": 1.1959457294955915, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 113.726, "cuda_time_us": 9.023, "pct_cuda_time": 0.13198569352901482, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.023, "pct_cuda_time": 0.13198569352901482, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.464, "cuda_time_us": 43.584, "pct_cuda_time": 0.6375334663380896, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.584, "pct_cuda_time": 0.6375334663380896, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2358.838, "cuda_time_us": 196.89000000000001, "pct_cuda_time": 2.8800469022417965, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.5, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1660.672, "cuda_time_us": 57.181, "pct_cuda_time": 0.8364262375798067, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.184, "cuda_time_us": 20.447, "pct_cuda_time": 0.29909248316388853, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.447, "pct_cuda_time": 0.29909248316388853, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 464.194, "cuda_time_us": 3.711, "pct_cuda_time": 0.05428337678002593, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.711, "pct_cuda_time": 0.05428337678002593, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 728.563, "cuda_time_us": 15.232, "pct_cuda_time": 0.22280905284649827, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0369788133925911, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.424, "pct_cuda_time": 0.1671067896348737, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 164.948, "cuda_time_us": 17.791, "pct_cuda_time": 0.26024132478939405, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.647, "pct_cuda_time": 0.228879546342513, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 93.406, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.642, "cuda_time_us": 133.501, "pct_cuda_time": 1.9528119330396772, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.006, "cuda_time_us": 81.247, "pct_cuda_time": 1.1884563495679783, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.247, "pct_cuda_time": 1.1884563495679783, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.892, "cuda_time_us": 8.991, "pct_cuda_time": 0.13151760728353898, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.991, "pct_cuda_time": 0.13151760728353898, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.794, "cuda_time_us": 43.263, "pct_cuda_time": 0.6328379761881601, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.263, "pct_cuda_time": 0.6328379761881601, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2448.52, "cuda_time_us": 198.84699999999998, "pct_cuda_time": 2.9086733016916777, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.584, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1734.03, "cuda_time_us": 57.983999999999995, "pct_cuda_time": 0.8481722768022161, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.86, "cuda_time_us": 21.504, "pct_cuda_time": 0.3145539569597623, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.504, "pct_cuda_time": 0.3145539569597623, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.911, "cuda_time_us": 3.712, "pct_cuda_time": 0.05429800447519706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05429800447519706, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 764.393, "cuda_time_us": 15.072, "pct_cuda_time": 0.22046862161911907, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.2, "pct_cuda_time": 0.16383018591654283, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.376, "pct_cuda_time": 0.020127708555460977, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 167.27, "cuda_time_us": 17.695999999999998, "pct_cuda_time": 0.25885169374813766, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.22795800154673246, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.796, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 480.792, "cuda_time_us": 134.75099999999998, "pct_cuda_time": 1.9710965520035768, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.699, "cuda_time_us": 81.279, "pct_cuda_time": 1.1889244358134539, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.279, "pct_cuda_time": 1.1889244358134539, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.045, "cuda_time_us": 9.056, "pct_cuda_time": 0.13246840746966176, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.13246840746966176, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.145, "cuda_time_us": 44.416, "pct_cuda_time": 0.6497037087204613, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.416, "pct_cuda_time": 0.6497037087204613, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2710.591, "cuda_time_us": 197.40499999999997, "pct_cuda_time": 2.8875801652549224, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.964, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1767.844, "cuda_time_us": 57.278, "pct_cuda_time": 0.8378451240114054, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.551, "cuda_time_us": 20.639, "pct_cuda_time": 0.3019010006367435, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.639, "pct_cuda_time": 0.3019010006367435, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 469.66, "cuda_time_us": 3.872, "pct_cuda_time": 0.056638435702576234, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.872, "pct_cuda_time": 0.056638435702576234, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 841.217, "cuda_time_us": 14.975, "pct_cuda_time": 0.21904973518752044, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.199, "pct_cuda_time": 0.16381555822137173, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 166.139, "cuda_time_us": 17.792, "pct_cuda_time": 0.26025595248456523, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.679, "pct_cuda_time": 0.22934763258798888, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.113, "pct_cuda_time": 0.030908319896576338, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.838, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 701.372, "cuda_time_us": 134.015, "pct_cuda_time": 1.9603305683576326, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.439, "cuda_time_us": 81.631, "pct_cuda_time": 1.1940733845136882, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.631, "pct_cuda_time": 1.1940733845136882, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.303, "cuda_time_us": 8.928, "pct_cuda_time": 0.13059606248775846, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.13059606248775846, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 374.204, "cuda_time_us": 43.456, "pct_cuda_time": 0.6356611213561862, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.456, "pct_cuda_time": 0.6356611213561862, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2557.553, "cuda_time_us": 199.06899999999996, "pct_cuda_time": 2.911920650019666, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.705, "cuda_time_us": 3.168, "pct_cuda_time": 0.04634053830210783, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04634053830210783, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1818.976, "cuda_time_us": 58.047, "pct_cuda_time": 0.8490938215979966, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.223, "cuda_time_us": 21.375, "pct_cuda_time": 0.3126669842826878, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.375, "pct_cuda_time": 0.3126669842826878, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 547.208, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 791.382, "cuda_time_us": 15.2, "pct_cuda_time": 0.2223409666010224, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.624, "pct_cuda_time": 0.03838307212901861, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.296, "pct_cuda_time": 0.16523444465297035, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 163.603, "cuda_time_us": 17.792, "pct_cuda_time": 0.26025595248456523, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.68, "pct_cuda_time": 0.22936226028315998, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.854, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.044936279565680325, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 498.681, "cuda_time_us": 134.78199999999998, "pct_cuda_time": 1.9715500105538817, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 181.014, "cuda_time_us": 81.919, "pct_cuda_time": 1.1982861607229707, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.919, "pct_cuda_time": 1.1982861607229707, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.739, "cuda_time_us": 9.312, "pct_cuda_time": 0.13621309743346846, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.312, "pct_cuda_time": 0.13621309743346846, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.029, "cuda_time_us": 43.551, "pct_cuda_time": 0.6370507523974427, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.551, "pct_cuda_time": 0.6370507523974427, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2346.548, "cuda_time_us": 198.493, "pct_cuda_time": 2.9034950976011014, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.365, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1666.848, "cuda_time_us": 57.119, "pct_cuda_time": 0.8355193204791974, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.539, "cuda_time_us": 20.544, "pct_cuda_time": 0.3005113695954872, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.544, "pct_cuda_time": 0.3005113695954872, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 479.046, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 730.907, "cuda_time_us": 15.072, "pct_cuda_time": 0.22046862161911907, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.1647663584074945, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019191536064509306, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 155.98, "cuda_time_us": 17.823, "pct_cuda_time": 0.26070941103486994, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.679, "pct_cuda_time": 0.22934763258798888, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.107, "cuda_time_us": 3.296, "pct_cuda_time": 0.04821288328401118, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04821288328401118, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 456.647, "cuda_time_us": 135.07, "pct_cuda_time": 1.9757627867631644, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.615, "cuda_time_us": 82.367, "pct_cuda_time": 1.2048393681596326, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.367, "pct_cuda_time": 1.2048393681596326, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.237, "cuda_time_us": 9.152, "pct_cuda_time": 0.13387266620608929, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.13387266620608929, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.761, "cuda_time_us": 43.551, "pct_cuda_time": 0.6370507523974427, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.551, "pct_cuda_time": 0.6370507523974427, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2487.516, "cuda_time_us": 199.00400000000002, "pct_cuda_time": 2.910969849833544, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 88.838, "cuda_time_us": 3.103, "pct_cuda_time": 0.04538973811598504, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.04538973811598504, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1748.452, "cuda_time_us": 57.95, "pct_cuda_time": 0.8476749351663979, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.0, "cuda_time_us": 21.472, "pct_cuda_time": 0.31408587071428645, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.472, "pct_cuda_time": 0.31408587071428645, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 463.074, "cuda_time_us": 3.712, "pct_cuda_time": 0.05429800447519706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05429800447519706, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 777.219, "cuda_time_us": 15.167, "pct_cuda_time": 0.22185825266037548, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.03931924461997029, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.199, "pct_cuda_time": 0.16381555822137173, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 204.255, "cuda_time_us": 17.599, "pct_cuda_time": 0.25743280731653906, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.487, "pct_cuda_time": 0.22653911511513383, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.618, "cuda_time_us": 3.105, "pct_cuda_time": 0.045418993506327276, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.105, "pct_cuda_time": 0.045418993506327276, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 490.572, "cuda_time_us": 134.846, "pct_cuda_time": 1.9724861830448335, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 173.08, "cuda_time_us": 82.174, "pct_cuda_time": 1.2020162229916065, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.174, "pct_cuda_time": 1.2020162229916065, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 109.385, "cuda_time_us": 9.44, "pct_cuda_time": 0.13808544241537182, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.44, "pct_cuda_time": 0.13808544241537182, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.29, "cuda_time_us": 43.232, "pct_cuda_time": 0.6323845176378554, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.232, "pct_cuda_time": 0.6323845176378554, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2477.206, "cuda_time_us": 197.243, "pct_cuda_time": 2.8852104786372017, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.993, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1783.644, "cuda_time_us": 57.054, "pct_cuda_time": 0.8345685202930745, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.416, "cuda_time_us": 20.639, "pct_cuda_time": 0.3019010006367435, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.639, "pct_cuda_time": 0.3019010006367435, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 529.166, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 780.146, "cuda_time_us": 15.007, "pct_cuda_time": 0.2195178214329963, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.527, "pct_cuda_time": 0.03696418569741998, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.2, "pct_cuda_time": 0.16383018591654283, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 161.387, "cuda_time_us": 17.728, "pct_cuda_time": 0.25931977999361355, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.22795800154673246, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.566, "cuda_time_us": 3.071, "pct_cuda_time": 0.044921651870509206, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.044921651870509206, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 469.763, "cuda_time_us": 134.078, "pct_cuda_time": 1.9612521131534135, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 167.259, "cuda_time_us": 81.631, "pct_cuda_time": 1.1940733845136882, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.631, "pct_cuda_time": 1.1940733845136882, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.847, "cuda_time_us": 9.056, "pct_cuda_time": 0.13246840746966176, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.13246840746966176, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.618, "cuda_time_us": 43.391, "pct_cuda_time": 0.6347103211700634, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.391, "pct_cuda_time": 0.6347103211700634, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2373.551, "cuda_time_us": 198.205, "pct_cuda_time": 2.8992823213918193, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.978, "cuda_time_us": 3.104, "pct_cuda_time": 0.04540436581115616, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04540436581115616, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1670.473, "cuda_time_us": 57.983000000000004, "pct_cuda_time": 0.8481576491070449, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.122, "cuda_time_us": 21.536, "pct_cuda_time": 0.31502204320523813, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.536, "pct_cuda_time": 0.31502204320523813, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.162, "cuda_time_us": 3.712, "pct_cuda_time": 0.05429800447519706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05429800447519706, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 749.237, "cuda_time_us": 14.943, "pct_cuda_time": 0.21858164894204463, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.167, "pct_cuda_time": 0.16334747197589589, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 152.91, "cuda_time_us": 17.792, "pct_cuda_time": 0.26025595248456523, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.68, "pct_cuda_time": 0.22936226028315998, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.358, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 462.812, "cuda_time_us": 134.078, "pct_cuda_time": 1.9612521131534135, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.888, "cuda_time_us": 81.727, "pct_cuda_time": 1.1954776432501157, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.727, "pct_cuda_time": 1.1954776432501157, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.222, "cuda_time_us": 9.088, "pct_cuda_time": 0.1329364937151376, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.1329364937151376, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.38, "cuda_time_us": 43.263, "pct_cuda_time": 0.6328379761881601, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.263, "pct_cuda_time": 0.6328379761881601, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2427.885, "cuda_time_us": 197.405, "pct_cuda_time": 2.8875801652549233, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.098, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1732.768, "cuda_time_us": 57.312, "pct_cuda_time": 0.8383424656472235, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.56, "cuda_time_us": 20.672, "pct_cuda_time": 0.30238371457739055, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.672, "pct_cuda_time": 0.30238371457739055, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 493.471, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05382991822972122, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 751.679, "cuda_time_us": 15.04, "pct_cuda_time": 0.22000053537364322, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.16429827216201867, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019191536064509306, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 168.164, "cuda_time_us": 17.92, "pct_cuda_time": 0.2621282974664686, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.776, "pct_cuda_time": 0.23076651901958747, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.144, "pct_cuda_time": 0.03136177844688106, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.942, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.274, "cuda_time_us": 133.885, "pct_cuda_time": 1.9584289679853872, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.131, "cuda_time_us": 81.47, "pct_cuda_time": 1.191718325591138, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.47, "pct_cuda_time": 1.191718325591138, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.453, "cuda_time_us": 9.344, "pct_cuda_time": 0.1366811836789443, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.344, "pct_cuda_time": 0.1366811836789443, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 149.722, "cuda_time_us": 43.071, "pct_cuda_time": 0.630029458715305, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.071, "pct_cuda_time": 0.630029458715305, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2338.99, "cuda_time_us": 198.718, "pct_cuda_time": 2.9067863290146034, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.866, "cuda_time_us": 3.104, "pct_cuda_time": 0.04540436581115616, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.04540436581115616, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1659.562, "cuda_time_us": 57.599, "pct_cuda_time": 0.842540614161335, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.366, "cuda_time_us": 21.376, "pct_cuda_time": 0.31268161197785893, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.376, "pct_cuda_time": 0.31268161197785893, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 468.08, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 740.719, "cuda_time_us": 14.943, "pct_cuda_time": 0.21858164894204463, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.167, "pct_cuda_time": 0.16334747197589589, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 155.729, "cuda_time_us": 17.439999999999998, "pct_cuda_time": 0.25510700378433093, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.328, "pct_cuda_time": 0.22421331158292576, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.326, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.885, "cuda_time_us": 134.815, "pct_cuda_time": 1.9720327244945288, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.717, "cuda_time_us": 81.343, "pct_cuda_time": 1.1898606083044059, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.343, "pct_cuda_time": 1.1898606083044059, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.147, "cuda_time_us": 9.12, "pct_cuda_time": 0.13340457996061345, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.13340457996061345, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.9, "cuda_time_us": 44.352, "pct_cuda_time": 0.6487675362295096, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.352, "pct_cuda_time": 0.6487675362295096, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2740.908, "cuda_time_us": 197.14999999999998, "pct_cuda_time": 2.8838501029862873, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.146, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2028.507, "cuda_time_us": 57.183, "pct_cuda_time": 0.8364554929701491, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 161.011, "cuda_time_us": 20.576, "pct_cuda_time": 0.300979455840963, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.576, "pct_cuda_time": 0.300979455840963, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 542.865, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 986.673, "cuda_time_us": 15.007, "pct_cuda_time": 0.2195178214329963, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.199, "pct_cuda_time": 0.16381555822137173, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019191536064509306, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 173.004, "cuda_time_us": 17.759999999999998, "pct_cuda_time": 0.25978786623908934, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.22889417403768414, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.505, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 483.328, "cuda_time_us": 133.91899999999998, "pct_cuda_time": 1.958926309621205, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 175.495, "cuda_time_us": 81.663, "pct_cuda_time": 1.1945414707591642, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.663, "pct_cuda_time": 1.1945414707591642, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.901, "cuda_time_us": 8.928, "pct_cuda_time": 0.13059606248775846, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.13059606248775846, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.771, "cuda_time_us": 43.328, "pct_cuda_time": 0.6337887763742829, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.328, "pct_cuda_time": 0.6337887763742829, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2408.68, "cuda_time_us": 198.301, "pct_cuda_time": 2.900686580128246, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.318, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1689.089, "cuda_time_us": 58.013999999999996, "pct_cuda_time": 0.8486111076573497, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.496, "cuda_time_us": 21.247, "pct_cuda_time": 0.31079463930078444, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.247, "pct_cuda_time": 0.31079463930078444, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.523, "cuda_time_us": 3.712, "pct_cuda_time": 0.05429800447519706, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05429800447519706, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 733.319, "cuda_time_us": 15.2, "pct_cuda_time": 0.2223409666010224, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0369788133925911, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.1647663584074945, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.408, "pct_cuda_time": 0.020595794800936814, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 173.027, "cuda_time_us": 17.855, "pct_cuda_time": 0.2611774972803457, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.22983034652863582, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.143, "pct_cuda_time": 0.031347150751709935, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.178, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.04680862454758367, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 496.626, "cuda_time_us": 133.887, "pct_cuda_time": 1.9584582233757295, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 186.251, "cuda_time_us": 81.343, "pct_cuda_time": 1.1898606083044059, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.343, "pct_cuda_time": 1.1898606083044059, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.562, "cuda_time_us": 9.376, "pct_cuda_time": 0.13714926992442014, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.376, "pct_cuda_time": 0.13714926992442014, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.656, "cuda_time_us": 43.168, "pct_cuda_time": 0.6314483451469036, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.168, "pct_cuda_time": 0.6314483451469036, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2548.278, "cuda_time_us": 198.074, "pct_cuda_time": 2.8973660933244028, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.517, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1811.334, "cuda_time_us": 56.893, "pct_cuda_time": 0.8322134613705243, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.145, "cuda_time_us": 20.416, "pct_cuda_time": 0.2986390246135838, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.416, "pct_cuda_time": 0.2986390246135838, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 492.633, "cuda_time_us": 3.648, "pct_cuda_time": 0.053361831984245385, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053361831984245385, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 766.295, "cuda_time_us": 15.069999999999999, "pct_cuda_time": 0.22043936622877683, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.527, "pct_cuda_time": 0.03696418569741998, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.231, "pct_cuda_time": 0.16428364446684757, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019191536064509306, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 254.309, "cuda_time_us": 17.759, "pct_cuda_time": 0.25977323854391826, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.647, "pct_cuda_time": 0.228879546342513, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.557, "cuda_time_us": 3.168, "pct_cuda_time": 0.04634053830210783, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04634053830210783, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 499.714, "cuda_time_us": 134.973, "pct_cuda_time": 1.974343900331566, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 177.007, "cuda_time_us": 81.502, "pct_cuda_time": 1.1921864118366137, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.502, "pct_cuda_time": 1.1921864118366137, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.131, "cuda_time_us": 9.12, "pct_cuda_time": 0.13340457996061345, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.13340457996061345, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.079, "cuda_time_us": 44.351, "pct_cuda_time": 0.6487529085343385, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.351, "pct_cuda_time": 0.6487529085343385, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2472.267, "cuda_time_us": 198.62400000000002, "pct_cuda_time": 2.905411325668519, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 90.773, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1759.666, "cuda_time_us": 58.241, "pct_cuda_time": 0.8519315944611938, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.37, "cuda_time_us": 21.504, "pct_cuda_time": 0.3145539569597623, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.504, "pct_cuda_time": 0.3145539569597623, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 511.902, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.0561703494571004, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 789.548, "cuda_time_us": 15.104999999999999, "pct_cuda_time": 0.220951335559766, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.593, "pct_cuda_time": 0.03792961357871389, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.2, "pct_cuda_time": 0.16383018591654283, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019191536064509306, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 160.2, "cuda_time_us": 17.792, "pct_cuda_time": 0.26025595248456523, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.2284260877922083, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.176, "pct_cuda_time": 0.0318298646923569, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.222, "cuda_time_us": 3.168, "pct_cuda_time": 0.04634053830210783, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04634053830210783, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 468.535, "cuda_time_us": 134.175, "pct_cuda_time": 1.9626709995850125, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.921, "cuda_time_us": 81.343, "pct_cuda_time": 1.1898606083044059, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.343, "pct_cuda_time": 1.1898606083044059, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.473, "cuda_time_us": 8.928, "pct_cuda_time": 0.13059606248775846, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.13059606248775846, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.371, "cuda_time_us": 43.904, "pct_cuda_time": 0.642214328792848, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.904, "pct_cuda_time": 0.642214328792848, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2483.514, "cuda_time_us": 197.185, "pct_cuda_time": 2.884362072317277, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.846, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04446819332020449, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1740.563, "cuda_time_us": 56.80200000000001, "pct_cuda_time": 0.8308823411099524, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.938, "cuda_time_us": 20.48, "pct_cuda_time": 0.2995751971045355, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.48, "pct_cuda_time": 0.2995751971045355, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[8, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 495.386, "cuda_time_us": 3.713, "pct_cuda_time": 0.054312632170368184, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.713, "pct_cuda_time": 0.054312632170368184, "trace": "_C::rotary_embedding(int64[8], bfloat16[8, 4096], bfloat16[8, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 761.019, "cuda_time_us": 15.04, "pct_cuda_time": 0.22000053537364322, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036510727147115264, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[8], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.1647663584074945, "trace": "_vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.01872344981903347, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[8, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[8, 1, 32, 128], None, None, None, None, int32[8], None, None, int32[8, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[8, 32, 128], bfloat16[8, 8, 128], bfloat16[8, 8, 128], bfloat16[8, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 175.732, "cuda_time_us": 17.569000000000003, "pct_cuda_time": 0.2569939764614055, "trace": "" }, "children": [ { "entry": { "name": "void cutlass::Kernel2(cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_64x4_tn_align8::Params)", "cpu_time_us": 0, "cuda_time_us": 15.457, "pct_cuda_time": 0.22610028426000026, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "void cublasLt::splitKreduce_kernel<32, 16, int, __nv_bfloat16, __nv_bfloat16, float, __nv_bfloat16, true, false, false>(cublasLt::cublasSplitKParams, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, float const*, float const*, __nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, void*, long, float*, int*)", "cpu_time_us": 0, "cuda_time_us": 2.112, "pct_cuda_time": 0.030893692201405223, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[8, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.804, "cuda_time_us": 3.296, "pct_cuda_time": 0.04821288328401118, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.04821288328401118, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 507.849, "cuda_time_us": 134.047, "pct_cuda_time": 1.9607986546031086, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 172.243, "cuda_time_us": 80.607, "pct_cuda_time": 1.1790946246584615, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.607, "pct_cuda_time": 1.1790946246584615, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[8, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 105.36, "cuda_time_us": 9.088, "pct_cuda_time": 0.1329364937151376, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.1329364937151376, "trace": "_C::silu_and_mul(bfloat16[8, 14336], bfloat16[8, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 170.064, "cuda_time_us": 44.352, "pct_cuda_time": 0.6487675362295096, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.352, "pct_cuda_time": 0.6487675362295096, "trace": "mm(bfloat16[8, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[8, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[8, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.907, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04400010707472865, "trace": "_C::fused_add_rms_norm(bfloat16[8, 4096], bfloat16[8, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 534.195, "cuda_time_us": 349.083, "pct_cuda_time": 5.106279713420047, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 5.6, "pct_cuda_time": 0.08191509295827142, "trace": "index_select(bfloat16[8, 4096], 0, int64[8])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010765983645944244, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[8, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 342.747, "pct_cuda_time": 5.013598636815832, "trace": "mm(bfloat16[8, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[8, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[8, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3857.672, "cuda_time_us": 119.422, "pct_cuda_time": 1.7468686127254802, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010765983645944244, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010765983645944244, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.011219442196248961, "trace": "copy_(int32[8], int32[8], True) <- _to_copy(int32[8], 3, 0, None, None, True, None) <- to(int32[8], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011234069891420081, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.011234069891420081, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011702156136895918, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011702156136895918, "trace": "copy_(bfloat16[8], bfloat16[8], True) <- _to_copy(bfloat16[8], 15, 0, None, None, True, None) <- to(bfloat16[8], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 4.833, "pct_cuda_time": 0.07069565076202246, "trace": "copy_(float32[8, 128256], bfloat16[8, 128256], False) <- _to_copy(bfloat16[8, 128256], 6, None, None, None, False, None) <- to(bfloat16[8, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 6.4, "pct_cuda_time": 0.09361724909516735, "trace": "div_(float32[8, 128256], bfloat16[8, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 34.976, "pct_cuda_time": 0.5116182663050894, "trace": "_softmax(float32[8, 128256], -1, False) <- softmax(float32[8, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.351, "pct_cuda_time": 0.4147097857964202, "trace": "_log_softmax(float32[8, 128256], -1, False) <- log_softmax(float32[8, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 2.016, "pct_cuda_time": 0.02948943346497771, "trace": "copy_(int64[8], int32[8], False) <- _to_copy(int32[8], 4, None, None, None, False, None) <- to(int32[8], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 7.392, "pct_cuda_time": 0.10812792270491828, "trace": "index(float32[8, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 27.615, "pct_cuda_time": 0.40394380215047593, "trace": "argmax(float32[8, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.036042640901639424, "trace": "copy_(int64[8], int64[8], False) <- _to_copy(int64[8], 4, 0, None, None, False, None) <- to(int64[8], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] } }