{ "context": { "python_version": "3.12.9 | packaged by Anaconda, Inc. | (main, Feb 6 2025, 18:56:27) [GCC 11.2.0]", "torch_version": "2.5.1+cu124", "engine_args": { "model": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "served_model_name": null, "tokenizer": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "task": "auto", "skip_tokenizer_init": false, "tokenizer_mode": "auto", "trust_remote_code": false, "allowed_local_media_path": null, "download_dir": null, "load_format": "dummy", "config_format": "auto", "dtype": "auto", "kv_cache_dtype": "auto", "seed": 0, "max_model_len": null, "distributed_executor_backend": null, "pipeline_parallel_size": 1, "tensor_parallel_size": 1, "max_parallel_loading_workers": null, "block_size": null, "enable_prefix_caching": false, "disable_sliding_window": false, "use_v2_block_manager": true, "swap_space": 4, "cpu_offload_gb": 0, "gpu_memory_utilization": 0.9, "max_num_batched_tokens": 8000, "max_num_partial_prefills": 1, "max_long_partial_prefills": 1, "long_prefill_token_threshold": 0, "max_num_seqs": 256, "max_logprobs": 20, "disable_log_stats": false, "revision": null, "code_revision": null, "rope_scaling": null, "rope_theta": null, "hf_overrides": null, "tokenizer_revision": null, "quantization": null, "enforce_eager": true, "max_seq_len_to_capture": 8192, "disable_custom_all_reduce": false, "tokenizer_pool_size": 0, "tokenizer_pool_type": "ray", "tokenizer_pool_extra_config": null, "limit_mm_per_prompt": null, "mm_processor_kwargs": null, "disable_mm_preprocessor_cache": false, "enable_lora": false, "enable_lora_bias": false, "max_loras": 1, "max_lora_rank": 16, "enable_prompt_adapter": false, "max_prompt_adapters": 1, "max_prompt_adapter_token": 0, "fully_sharded_loras": false, "lora_extra_vocab_size": 256, "long_lora_scaling_factors": null, "lora_dtype": "auto", "max_cpu_loras": null, "device": "auto", "num_scheduler_steps": 1, "multi_step_stream_outputs": true, "ray_workers_use_nsight": false, "num_gpu_blocks_override": null, "num_lookahead_slots": 0, "model_loader_extra_config": null, "ignore_patterns": [], "preemption_mode": null, "scheduler_delay_factor": 0.0, "enable_chunked_prefill": null, "guided_decoding_backend": "xgrammar", "logits_processor_pattern": null, "speculative_model": null, "speculative_model_quantization": null, "speculative_draft_tensor_parallel_size": null, "num_speculative_tokens": null, "speculative_disable_mqa_scorer": false, "speculative_max_model_len": null, "speculative_disable_by_batch_size": null, "ngram_prompt_lookup_max": null, "ngram_prompt_lookup_min": null, "spec_decoding_acceptance_method": "rejection_sampler", "typical_acceptance_sampler_posterior_threshold": null, "typical_acceptance_sampler_posterior_alpha": null, "qlora_adapter_name_or_path": null, "disable_logprobs_during_spec_decoding": null, "otlp_traces_endpoint": null, "collect_detailed_traces": null, "disable_async_output_proc": false, "scheduling_policy": "fcfs", "scheduler_cls": "vllm.core.scheduler.Scheduler", "override_neuron_config": null, "override_pooler_config": null, "compilation_config": null, "worker_cls": "auto", "kv_transfer_config": null, "generation_config": null, "override_generation_config": null, "enable_sleep_mode": false, "model_impl": "auto", "calculate_kv_scales": false, "additional_config": null }, "prompt_len": 0, "batch_size": 12, "num_steps": 2, "complete_num_requests_per_step": null, "save_chrome_traces_folder": null }, "prefill": { "metadata": { "num_running_seqs": null }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 33834.56799999999, "pct_cuda_time": 98.56911706084959, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 43.999, "pct_cuda_time": 0.12818081736880232, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cuda_time_us": 43.999, "pct_cuda_time": 0.12818081736880232, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 33776.009999999995, "pct_cuda_time": 98.39852199497352, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 965.1080000000003, "pct_cuda_time": 2.8116169069562966, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 19.615, "pct_cuda_time": 0.05714372446394366, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 945.4930000000003, "pct_cuda_time": 2.7544731824923527, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 7646.6230000000005, "pct_cuda_time": 22.276651429602566, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 3433.4869999999996, "pct_cuda_time": 10.002663017003952, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.90200000000001, "pct_cuda_time": 0.06963289840107989, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 3409.5849999999996, "pct_cuda_time": 9.933030118602872, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 623.357, "pct_cuda_time": 1.8160051313112686, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 623.357, "pct_cuda_time": 1.8160051313112686, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 964.47, "pct_cuda_time": 2.8097582428620824, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 244.028, "pct_cuda_time": 0.7109186231703922, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 675.4809999999999, "pct_cuda_time": 1.9678562398485404, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 44.961000000000006, "pct_cuda_time": 0.1309833798431492, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 2625.3090000000007, "pct_cuda_time": 7.648225038425262, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 24.894000000000005, "pct_cuda_time": 0.07252285887358725, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2600.4150000000004, "pct_cuda_time": 7.575702179551675, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 25164.27899999999, "pct_cuda_time": 73.31025365841465, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 15461.015999999998, "pct_cuda_time": 45.04206159758472, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 23.681000000000008, "pct_cuda_time": 0.06898906648129749, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 15437.334999999997, "pct_cuda_time": 44.97307253110342, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 2147.4289999999996, "pct_cuda_time": 6.256033193060517, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 2147.4289999999996, "pct_cuda_time": 6.256033193060517, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 7555.834, "pct_cuda_time": 22.012158867769426, "invocations": 32 }, "children": [ { "entry": { "name": "Memset (Device)", "cuda_time_us": 25.951000000000015, "pct_cuda_time": 0.07560218167544241, "invocations": 32 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cuda_time_us": 7529.882999999999, "pct_cuda_time": 21.93655668609398, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 14.559, "pct_cuda_time": 0.04241424850729318, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 14.559, "pct_cuda_time": 0.04241424850729318, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 363.9, "pct_cuda_time": 1.0601377176869282, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 7.233, "pct_cuda_time": 0.021071657356497808, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 355.931, "pct_cuda_time": 1.0369218961088926, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 127.26100000000001, "pct_cuda_time": 0.37074522146346856, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.3759999999999994, "pct_cuda_time": 0.015661721270362534, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 7.008, "pct_cuda_time": 0.02041617237029402, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 8.544, "pct_cuda_time": 0.02489094987611189, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 35.743, "pct_cuda_time": 0.10412888827503128, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.831, "pct_cuda_time": 0.08399238949885086, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 2.048, "pct_cuda_time": 0.005966370007757156, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 9.184, "pct_cuda_time": 0.026755440503535993, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 27.744, "pct_cuda_time": 0.08082566869883523, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.783, "pct_cuda_time": 0.008107620962689535, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 83684.435, "cuda_time_us": 33834.56799999999, "pct_cuda_time": 98.56911706084959, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 328.111, "cuda_time_us": 43.999, "pct_cuda_time": 0.12818081736880232, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectLargeIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 43.999, "pct_cuda_time": 0.12818081736880232, "trace": "index_select(bfloat16[128256, 4096], 0, int64[1536]) <- embedding(bfloat16[128256, 4096], int64[1536], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4162.671, "cuda_time_us": 1065.1370000000002, "pct_cuda_time": 3.103028052222869, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 230.052, "cuda_time_us": 19.615, "pct_cuda_time": 0.05714372446394366, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.615, "pct_cuda_time": 0.05714372446394366, "trace": "_C::rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 3120.042, "cuda_time_us": 244.861, "pct_cuda_time": 0.713345374252649, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 436.677, "cuda_time_us": 111.742, "pct_cuda_time": 0.3255342370150392, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 111.006, "pct_cuda_time": 0.3233900727935014, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1029.931, "cuda_time_us": 19.52, "pct_cuda_time": 0.0568669641364354, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.52, "pct_cuda_time": 0.0568669641364354, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1074.83, "cuda_time_us": 30.08, "pct_cuda_time": 0.08763105948893322, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.616, "pct_cuda_time": 0.022187438466346923, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.151, "pct_cuda_time": 0.06161850196976153, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.0038251190528247785, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 333.033, "cuda_time_us": 83.519, "pct_cuda_time": 0.24331311361224117, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.783, "pct_cuda_time": 0.24116894939070344, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 118.836, "cuda_time_us": 15.519, "pct_cuda_time": 0.04521098444842935, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.519, "pct_cuda_time": 0.04521098444842935, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 574.447, "cuda_time_us": 785.142, "pct_cuda_time": 2.2873279690578467, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 191.43, "cuda_time_us": 482.554, "pct_cuda_time": 1.4058084534781479, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.818, "pct_cuda_time": 1.40366428925661, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 133.913, "cuda_time_us": 67.455, "pct_cuda_time": 0.19651439886389596, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.455, "pct_cuda_time": 0.19651439886389596, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 177.701, "cuda_time_us": 235.13299999999998, "pct_cuda_time": 0.6850051167158023, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.301, "pct_cuda_time": 0.6825812789001511, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2626.917, "cuda_time_us": 1048.625, "pct_cuda_time": 3.0549241940353262, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.468, "cuda_time_us": 14.56, "pct_cuda_time": 0.04241716177389853, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.56, "pct_cuda_time": 0.04241716177389853, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1913.217, "cuda_time_us": 236.92300000000003, "pct_cuda_time": 0.6902198639393794, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 162.606, "cuda_time_us": 107.006, "pct_cuda_time": 0.3117370063721007, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.27, "pct_cuda_time": 0.309592842150563, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 566.641, "cuda_time_us": 19.328, "pct_cuda_time": 0.05630761694820816, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.328, "pct_cuda_time": 0.05630761694820816, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 807.481, "cuda_time_us": 30.367000000000004, "pct_cuda_time": 0.08846716700466875, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.711, "pct_cuda_time": 0.022464198793855195, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.12, "pct_cuda_time": 0.06152819070499568, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.536, "pct_cuda_time": 0.004474777505817867, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 198.666, "cuda_time_us": 80.22200000000001, "pct_cuda_time": 0.23370807361440168, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 79.486, "pct_cuda_time": 0.23156390939286395, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.697, "cuda_time_us": 15.136, "pct_cuda_time": 0.04409520333858023, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.136, "pct_cuda_time": 0.04409520333858023, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 470.78, "cuda_time_us": 782.006, "pct_cuda_time": 2.278191964983468, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 165.919, "cuda_time_us": 480.154, "pct_cuda_time": 1.3988166136253075, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 479.418, "pct_cuda_time": 1.3966724494037697, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.747, "cuda_time_us": 66.943, "pct_cuda_time": 0.19502280636195668, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.943, "pct_cuda_time": 0.19502280636195668, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.848, "cuda_time_us": 234.909, "pct_cuda_time": 0.684352544996204, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.077, "pct_cuda_time": 0.6819287071805527, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2635.803, "cuda_time_us": 1054.192, "pct_cuda_time": 3.0711423492273107, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.571, "cuda_time_us": 14.367, "pct_cuda_time": 0.041854901319065954, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.367, "pct_cuda_time": 0.041854901319065954, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1876.371, "cuda_time_us": 236.89100000000002, "pct_cuda_time": 0.6901266394080081, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.048, "cuda_time_us": 106.97500000000001, "pct_cuda_time": 0.3116466951073349, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.239, "pct_cuda_time": 0.30950253088579716, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 566.104, "cuda_time_us": 19.647, "pct_cuda_time": 0.05723694899531487, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.647, "pct_cuda_time": 0.05723694899531487, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 778.949, "cuda_time_us": 30.078, "pct_cuda_time": 0.08762523295572254, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.552, "pct_cuda_time": 0.022000989403604513, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.055, "pct_cuda_time": 0.061338828375647914, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.471, "pct_cuda_time": 0.004285415176470106, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 221.477, "cuda_time_us": 80.191, "pct_cuda_time": 0.23361776234963583, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 79.455, "pct_cuda_time": 0.23147359812809806, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.003, "cuda_time_us": 15.072, "pct_cuda_time": 0.04390875427583782, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.072, "pct_cuda_time": 0.04390875427583782, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 500.376, "cuda_time_us": 787.862, "pct_cuda_time": 2.295252054224399, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 182.771, "cuda_time_us": 483.83299999999997, "pct_cuda_time": 1.4095345214663908, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 483.097, "pct_cuda_time": 1.407390357244853, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.689, "cuda_time_us": 67.424, "pct_cuda_time": 0.19642408759913016, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.424, "pct_cuda_time": 0.19642408759913016, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 157.501, "cuda_time_us": 236.605, "pct_cuda_time": 0.6892934451588779, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.773, "pct_cuda_time": 0.6868696073432266, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2478.023, "cuda_time_us": 1055.41, "pct_cuda_time": 3.0746907079526276, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.866, "cuda_time_us": 14.368, "pct_cuda_time": 0.0418578145856713, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.368, "pct_cuda_time": 0.0418578145856713, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1792.65, "cuda_time_us": 240.668, "pct_cuda_time": 0.7011300473764157, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 156.119, "cuda_time_us": 107.775, "pct_cuda_time": 0.31397730839161503, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 107.039, "pct_cuda_time": 0.3118331441700773, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 511.831, "cuda_time_us": 19.84, "pct_cuda_time": 0.05779920945014745, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.84, "pct_cuda_time": 0.05779920945014745, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 737.381, "cuda_time_us": 30.047, "pct_cuda_time": 0.08753492169095668, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.68, "pct_cuda_time": 0.022373887529089337, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.055, "pct_cuda_time": 0.061338828375647914, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0038222057862194285, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 234.635, "cuda_time_us": 83.006, "pct_cuda_time": 0.24181860784369655, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.27, "pct_cuda_time": 0.2396744436221588, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.765, "cuda_time_us": 15.808, "pct_cuda_time": 0.04605291849737555, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.808, "pct_cuda_time": 0.04605291849737555, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 455.932, "cuda_time_us": 784.566, "pct_cuda_time": 2.2856499274931648, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.216, "cuda_time_us": 481.786, "pct_cuda_time": 1.4035710647252388, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.05, "pct_cuda_time": 1.4014269005037012, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.157, "cuda_time_us": 67.327, "pct_cuda_time": 0.1961415007384112, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.327, "pct_cuda_time": 0.1961415007384112, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.195, "cuda_time_us": 235.453, "pct_cuda_time": 0.6859373620295145, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.621, "pct_cuda_time": 0.6835135242138632, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2416.184, "cuda_time_us": 1051.9560000000001, "pct_cuda_time": 3.064628285097748, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.036, "cuda_time_us": 15.008, "pct_cuda_time": 0.04372230521309541, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.008, "pct_cuda_time": 0.04372230521309541, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1695.681, "cuda_time_us": 238.399, "pct_cuda_time": 0.6945198454488761, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.343, "cuda_time_us": 107.42200000000001, "pct_cuda_time": 0.3129489252799264, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.686, "pct_cuda_time": 0.31080476105838867, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 479.894, "cuda_time_us": 19.168, "pct_cuda_time": 0.055841494291352135, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.168, "pct_cuda_time": 0.055841494291352135, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 744.814, "cuda_time_us": 29.985, "pct_cuda_time": 0.08735429916142497, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.52, "pct_cuda_time": 0.021907764872233306, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.152, "pct_cuda_time": 0.061621415236366876, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.313, "pct_cuda_time": 0.0038251190528247785, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.436, "cuda_time_us": 81.824, "pct_cuda_time": 0.23837512671617261, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.056, "pct_cuda_time": 0.2361377379632637, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.289, "cuda_time_us": 15.552, "pct_cuda_time": 0.0453071222464059, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.0453071222464059, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 491.892, "cuda_time_us": 782.9970000000001, "pct_cuda_time": 2.2810790121893705, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 156.995, "cuda_time_us": 480.954, "pct_cuda_time": 1.4011472269095877, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 480.218, "pct_cuda_time": 1.3990030626880499, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.77, "cuda_time_us": 66.463, "pct_cuda_time": 0.1936244383913886, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.463, "pct_cuda_time": 0.1936244383913886, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 158.336, "cuda_time_us": 235.58, "pct_cuda_time": 0.686307346888394, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.799, "pct_cuda_time": 0.0023277000176747894, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.781, "pct_cuda_time": 0.6839796468707192, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2834.985, "cuda_time_us": 1052.2089999999998, "pct_cuda_time": 3.0653653415489006, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.729, "cuda_time_us": 14.688, "pct_cuda_time": 0.04279005989938336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.688, "pct_cuda_time": 0.04279005989938336, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2113.744, "cuda_time_us": 236.413, "pct_cuda_time": 0.6887340979706508, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.272, "cuda_time_us": 107.16600000000001, "pct_cuda_time": 0.3122031290289568, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.43, "pct_cuda_time": 0.310058964807419, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 754.139, "cuda_time_us": 19.232, "pct_cuda_time": 0.05602794335409454, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.232, "pct_cuda_time": 0.05602794335409454, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 808.684, "cuda_time_us": 30.464000000000002, "pct_cuda_time": 0.0887497538653877, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.52, "pct_cuda_time": 0.021907764872233306, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.44, "pct_cuda_time": 0.06246043601870774, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.004381552974446662, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 209.516, "cuda_time_us": 79.551, "pct_cuda_time": 0.2317532717222117, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 78.815, "pct_cuda_time": 0.22960910750067395, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.239, "cuda_time_us": 14.88, "pct_cuda_time": 0.04334940708761059, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.88, "pct_cuda_time": 0.04334940708761059, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.551, "cuda_time_us": 786.228, "pct_cuda_time": 2.2904917765912565, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 170.719, "cuda_time_us": 483.993, "pct_cuda_time": 1.4100006441232467, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 483.257, "pct_cuda_time": 1.407856479901709, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.779, "cuda_time_us": 66.591, "pct_cuda_time": 0.19399733651687343, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.591, "pct_cuda_time": 0.19399733651687343, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.334, "cuda_time_us": 235.644, "pct_cuda_time": 0.6864937959511365, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.844, "pct_cuda_time": 0.6841631826668563, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2399.425, "cuda_time_us": 1053.652, "pct_cuda_time": 3.0695691852604217, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.754, "cuda_time_us": 14.56, "pct_cuda_time": 0.04241716177389853, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.56, "pct_cuda_time": 0.04241716177389853, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1717.133, "cuda_time_us": 236.99, "pct_cuda_time": 0.6904150528019378, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.75, "cuda_time_us": 107.199, "pct_cuda_time": 0.31229926682693326, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.463, "pct_cuda_time": 0.3101551026053956, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 512.157, "cuda_time_us": 19.392, "pct_cuda_time": 0.05649406601095057, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.392, "pct_cuda_time": 0.05649406601095057, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 734.419, "cuda_time_us": 30.048000000000002, "pct_cuda_time": 0.08753783495756202, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.616, "pct_cuda_time": 0.022187438466346923, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.832, "pct_cuda_time": 0.06068916992265482, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.6, "pct_cuda_time": 0.004661226568560279, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.293, "cuda_time_us": 80.351, "pct_cuda_time": 0.23408388500649183, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 79.615, "pct_cuda_time": 0.2319397207849541, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 92.292, "cuda_time_us": 15.616, "pct_cuda_time": 0.045493571309148315, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.045493571309148315, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.484, "cuda_time_us": 786.486, "pct_cuda_time": 2.291243399375437, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.713, "cuda_time_us": 483.258, "pct_cuda_time": 1.4078593931683143, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 482.522, "pct_cuda_time": 1.4057152289467765, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.843, "cuda_time_us": 67.232, "pct_cuda_time": 0.19586474041090288, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.232, "pct_cuda_time": 0.19586474041090288, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.754, "cuda_time_us": 235.996, "pct_cuda_time": 0.6875192657962197, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.196, "pct_cuda_time": 0.6851886525119396, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2478.503, "cuda_time_us": 1056.115, "pct_cuda_time": 3.076744560909399, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.27, "cuda_time_us": 15.007, "pct_cuda_time": 0.043719391946490056, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.007, "pct_cuda_time": 0.043719391946490056, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1751.626, "cuda_time_us": 239.582, "pct_cuda_time": 0.6979662398430054, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.327, "cuda_time_us": 106.783, "pct_cuda_time": 0.31108734791910764, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.015, "pct_cuda_time": 0.3088499591661987, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 513.889, "cuda_time_us": 19.04, "pct_cuda_time": 0.055468596165867315, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.04, "pct_cuda_time": 0.055468596165867315, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 739.19, "cuda_time_us": 30.048000000000002, "pct_cuda_time": 0.08753783495756202, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.616, "pct_cuda_time": 0.022187438466346923, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.184, "pct_cuda_time": 0.06171463976773809, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.248, "pct_cuda_time": 0.0036357567234770174, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 207.424, "cuda_time_us": 83.711, "pct_cuda_time": 0.2438724608004684, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.975, "pct_cuda_time": 0.2417282965789307, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.585, "cuda_time_us": 15.648, "pct_cuda_time": 0.04558679584051952, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.04558679584051952, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 492.829, "cuda_time_us": 785.878, "pct_cuda_time": 2.289472133279384, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 180.25, "cuda_time_us": 481.978, "pct_cuda_time": 1.404130411913466, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.242, "pct_cuda_time": 1.4019862476919285, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.866, "cuda_time_us": 67.647, "pct_cuda_time": 0.19707374605212324, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.647, "pct_cuda_time": 0.19707374605212324, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.41, "cuda_time_us": 236.25300000000001, "pct_cuda_time": 0.6882679753137947, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.453, "pct_cuda_time": 0.6859373620295145, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2541.517, "cuda_time_us": 1053.297, "pct_cuda_time": 3.0685349756155222, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.993, "cuda_time_us": 14.4, "pct_cuda_time": 0.0419510391170425, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.4, "pct_cuda_time": 0.0419510391170425, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1759.337, "cuda_time_us": 237.692, "pct_cuda_time": 0.6924601659588936, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 166.408, "cuda_time_us": 106.783, "pct_cuda_time": 0.31108734791910764, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.047, "pct_cuda_time": 0.3089431836975699, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 498.468, "cuda_time_us": 19.2, "pct_cuda_time": 0.055934718822723335, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.2, "pct_cuda_time": 0.055934718822723335, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 756.463, "cuda_time_us": 29.727, "pct_cuda_time": 0.08660267637724463, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.456, "pct_cuda_time": 0.0217213158094909, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.927, "pct_cuda_time": 0.060965930250163086, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0039154303175906345, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.862, "cuda_time_us": 81.982, "pct_cuda_time": 0.23883542283981798, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.002141250954932378, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.247, "pct_cuda_time": 0.2366941718848856, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.569, "cuda_time_us": 15.136, "pct_cuda_time": 0.04409520333858023, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.136, "pct_cuda_time": 0.04409520333858023, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 558.516, "cuda_time_us": 786.069, "pct_cuda_time": 2.2900285672010057, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.249, "cuda_time_us": 483.32099999999997, "pct_cuda_time": 1.4080429289644514, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.767, "pct_cuda_time": 0.002234475486303584, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 482.554, "pct_cuda_time": 1.4058084534781479, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.245, "cuda_time_us": 67.167, "pct_cuda_time": 0.19567537808155513, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.167, "pct_cuda_time": 0.19567537808155513, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 252.646, "cuda_time_us": 235.58100000000002, "pct_cuda_time": 0.6863102601549994, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.781, "pct_cuda_time": 0.6839796468707192, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2483.847, "cuda_time_us": 1054.896, "pct_cuda_time": 3.0731932889174773, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.838, "cuda_time_us": 14.784, "pct_cuda_time": 0.04306973349349697, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.784, "pct_cuda_time": 0.04306973349349697, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1761.451, "cuda_time_us": 239.642, "pct_cuda_time": 0.6981410358393264, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.659, "cuda_time_us": 107.261, "pct_cuda_time": 0.312479889356465, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.002141250954932378, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.526, "pct_cuda_time": 0.31033863840153264, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 527.009, "cuda_time_us": 19.52, "pct_cuda_time": 0.0568669641364354, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.52, "pct_cuda_time": 0.0568669641364354, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 742.031, "cuda_time_us": 30.367, "pct_cuda_time": 0.08846716700466874, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.776, "pct_cuda_time": 0.02265356112320295, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.087, "pct_cuda_time": 0.06143205290701912, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.004381552974446662, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 187.135, "cuda_time_us": 82.494, "pct_cuda_time": 0.24032701534175724, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.002141250954932378, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.759, "pct_cuda_time": 0.2381857643868249, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 95.237, "cuda_time_us": 15.615, "pct_cuda_time": 0.045490658042542965, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.615, "pct_cuda_time": 0.045490658042542965, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 484.342, "cuda_time_us": 784.8549999999999, "pct_cuda_time": 2.2864918615421104, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.519, "cuda_time_us": 482.45799999999997, "pct_cuda_time": 1.4055287798840344, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.69, "pct_cuda_time": 1.4032913911311253, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.348, "cuda_time_us": 66.752, "pct_cuda_time": 0.1944663724403348, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.752, "pct_cuda_time": 0.1944663724403348, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 175.067, "cuda_time_us": 235.645, "pct_cuda_time": 0.6864967092177419, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.845, "pct_cuda_time": 0.6841660959334617, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2389.865, "cuda_time_us": 1049.843, "pct_cuda_time": 3.058472552760643, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.894, "cuda_time_us": 14.624, "pct_cuda_time": 0.042603610836640944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.624, "pct_cuda_time": 0.042603610836640944, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1719.155, "cuda_time_us": 235.58100000000002, "pct_cuda_time": 0.6863102601549994, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 158.969, "cuda_time_us": 106.941, "pct_cuda_time": 0.311547644042753, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.002141250954932378, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.206, "pct_cuda_time": 0.3094063930878206, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 493.421, "cuda_time_us": 19.297, "pct_cuda_time": 0.05621730568344231, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.297, "pct_cuda_time": 0.05621730568344231, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 749.37, "cuda_time_us": 29.92, "pct_cuda_time": 0.08716493683207721, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.424, "pct_cuda_time": 0.021628091278119693, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.024, "pct_cuda_time": 0.06124851711088206, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.004288328443075456, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 181.199, "cuda_time_us": 79.423, "pct_cuda_time": 0.23138037359672686, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 78.687, "pct_cuda_time": 0.22923620937518913, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.3, "cuda_time_us": 15.679, "pct_cuda_time": 0.04567710710528538, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.679, "pct_cuda_time": 0.04567710710528538, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 452.604, "cuda_time_us": 783.9590000000001, "pct_cuda_time": 2.2838815746637176, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 152.624, "cuda_time_us": 481.46700000000004, "pct_cuda_time": 1.4026417326781324, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0021470774881430783, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 480.73, "pct_cuda_time": 1.4004946551899893, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.722, "cuda_time_us": 67.103, "pct_cuda_time": 0.1954889290188127, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.103, "pct_cuda_time": 0.1954889290188127, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.744, "cuda_time_us": 235.389, "pct_cuda_time": 0.6857509129667722, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.589, "pct_cuda_time": 0.683420299682492, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2411.619, "cuda_time_us": 1056.34, "pct_cuda_time": 3.0774000458956023, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.243, "cuda_time_us": 14.688, "pct_cuda_time": 0.04279005989938336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.688, "pct_cuda_time": 0.04279005989938336, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1752.291, "cuda_time_us": 239.966, "pct_cuda_time": 0.6990849342194598, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.008, "cuda_time_us": 106.97500000000001, "pct_cuda_time": 0.3116466951073349, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.239, "pct_cuda_time": 0.30950253088579716, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 468.663, "cuda_time_us": 19.232, "pct_cuda_time": 0.05602794335409454, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.232, "pct_cuda_time": 0.05602794335409454, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 721.762, "cuda_time_us": 30.048000000000002, "pct_cuda_time": 0.08753783495756202, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.712, "pct_cuda_time": 0.022467112060460544, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.992, "pct_cuda_time": 0.061155292579510856, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.344, "pct_cuda_time": 0.0039154303175906345, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.521, "cuda_time_us": 83.711, "pct_cuda_time": 0.2438724608004684, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.975, "pct_cuda_time": 0.2417282965789307, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.979, "cuda_time_us": 15.552, "pct_cuda_time": 0.0453071222464059, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.552, "pct_cuda_time": 0.0453071222464059, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 443.311, "cuda_time_us": 786.1339999999999, "pct_cuda_time": 2.2902179295303533, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.269, "cuda_time_us": 482.554, "pct_cuda_time": 1.4058084534781479, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.818, "pct_cuda_time": 1.40366428925661, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.762, "cuda_time_us": 67.103, "pct_cuda_time": 0.1954889290188127, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.103, "pct_cuda_time": 0.1954889290188127, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.403, "cuda_time_us": 236.477, "pct_cuda_time": 0.6889205470333931, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.677, "pct_cuda_time": 0.6865899337491129, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2644.842, "cuda_time_us": 1050.451, "pct_cuda_time": 3.0602438188566956, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.421, "cuda_time_us": 14.336, "pct_cuda_time": 0.041764590054300096, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.336, "pct_cuda_time": 0.041764590054300096, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1944.342, "cuda_time_us": 237.34199999999998, "pct_cuda_time": 0.6914405226470209, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.851, "cuda_time_us": 107.007, "pct_cuda_time": 0.3117399196387061, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.271, "pct_cuda_time": 0.30959575541716833, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 483.885, "cuda_time_us": 19.584, "pct_cuda_time": 0.05705341319917781, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.584, "pct_cuda_time": 0.05705341319917781, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 965.253, "cuda_time_us": 29.76, "pct_cuda_time": 0.08669881417522118, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.488, "pct_cuda_time": 0.021814540340862103, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.96, "pct_cuda_time": 0.06106206804813965, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0038222057862194285, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 216.155, "cuda_time_us": 80.991, "pct_cuda_time": 0.23594837563391596, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.255, "pct_cuda_time": 0.23380421141237817, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.085, "cuda_time_us": 15.327, "pct_cuda_time": 0.04465163726020212, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.327, "pct_cuda_time": 0.04465163726020212, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 472.021, "cuda_time_us": 783.446, "pct_cuda_time": 2.2823870688951726, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 161.825, "cuda_time_us": 481.04999999999995, "pct_cuda_time": 1.401426900503701, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 480.282, "pct_cuda_time": 1.3991895117507922, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.572, "cuda_time_us": 67.263, "pct_cuda_time": 0.1959550516756688, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.263, "pct_cuda_time": 0.1959550516756688, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 160.721, "cuda_time_us": 235.133, "pct_cuda_time": 0.6850051167158024, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.333, "pct_cuda_time": 0.6826745034315223, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2496.697, "cuda_time_us": 1055.346, "pct_cuda_time": 3.0745042588898848, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.171, "cuda_time_us": 14.559, "pct_cuda_time": 0.04241424850729318, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.559, "pct_cuda_time": 0.04241424850729318, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1791.467, "cuda_time_us": 240.03, "pct_cuda_time": 0.6992713832822023, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 173.45, "cuda_time_us": 106.559, "pct_cuda_time": 0.3104347761995092, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 105.823, "pct_cuda_time": 0.3082906119779715, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 550.63, "cuda_time_us": 19.872, "pct_cuda_time": 0.05789243398151866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.872, "pct_cuda_time": 0.05789243398151866, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 733.565, "cuda_time_us": 30.240000000000002, "pct_cuda_time": 0.08809718214578927, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.616, "pct_cuda_time": 0.022187438466346923, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.12, "pct_cuda_time": 0.06152819070499568, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.004381552974446662, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 186.682, "cuda_time_us": 83.35900000000001, "pct_cuda_time": 0.2428469909553852, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.623, "pct_cuda_time": 0.24070282673384746, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.314, "cuda_time_us": 15.616, "pct_cuda_time": 0.045493571309148315, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.045493571309148315, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 483.89, "cuda_time_us": 785.1410000000001, "pct_cuda_time": 2.287325055791241, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 186.646, "cuda_time_us": 481.529, "pct_cuda_time": 1.402822355207664, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 480.793, "pct_cuda_time": 1.4006781909861261, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.208, "cuda_time_us": 67.455, "pct_cuda_time": 0.19651439886389596, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.455, "pct_cuda_time": 0.19651439886389596, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.559, "cuda_time_us": 236.15699999999998, "pct_cuda_time": 0.687988301719681, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.325, "pct_cuda_time": 0.6855644639040297, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2407.003, "cuda_time_us": 1053.776, "pct_cuda_time": 3.069930430319485, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.123, "cuda_time_us": 14.432, "pct_cuda_time": 0.04204426364841371, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.432, "pct_cuda_time": 0.04204426364841371, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1743.931, "cuda_time_us": 238.45999999999998, "pct_cuda_time": 0.6946975547118025, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.67, "cuda_time_us": 106.846, "pct_cuda_time": 0.3112708837152447, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.11, "pct_cuda_time": 0.30912671949370696, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 494.719, "cuda_time_us": 20.159, "pct_cuda_time": 0.05872854149725416, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.159, "pct_cuda_time": 0.05872854149725416, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 724.351, "cuda_time_us": 30.208000000000002, "pct_cuda_time": 0.08800395761441807, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.744, "pct_cuda_time": 0.022560336591831747, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.768, "pct_cuda_time": 0.060502720859912415, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.696, "pct_cuda_time": 0.004940900162673895, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 203.18, "cuda_time_us": 81.247, "pct_cuda_time": 0.2366941718848856, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.479, "pct_cuda_time": 0.23445678313197665, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.663, "cuda_time_us": 15.744, "pct_cuda_time": 0.045866469434633135, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.744, "pct_cuda_time": 0.045866469434633135, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 444.012, "cuda_time_us": 785.1400000000001, "pct_cuda_time": 2.2873221425246357, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.213, "cuda_time_us": 482.105, "pct_cuda_time": 1.4045003967723457, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.369, "pct_cuda_time": 1.402356232550808, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.168, "cuda_time_us": 66.879, "pct_cuda_time": 0.19483635729921428, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.879, "pct_cuda_time": 0.19483635729921428, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.522, "cuda_time_us": 236.156, "pct_cuda_time": 0.6879853884530757, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.324, "pct_cuda_time": 0.6855615506374244, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2372.44, "cuda_time_us": 1055.859, "pct_cuda_time": 3.0759987646584293, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.164, "cuda_time_us": 14.496, "pct_cuda_time": 0.042230712711156124, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.496, "pct_cuda_time": 0.042230712711156124, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1667.833, "cuda_time_us": 240.382, "pct_cuda_time": 0.7002968531272855, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.932, "cuda_time_us": 107.359, "pct_cuda_time": 0.31276538948378935, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.591, "pct_cuda_time": 0.3105280007308804, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 474.974, "cuda_time_us": 19.263, "pct_cuda_time": 0.0561182546188604, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.263, "pct_cuda_time": 0.0561182546188604, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 711.572, "cuda_time_us": 30.176000000000002, "pct_cuda_time": 0.08791073308304685, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.616, "pct_cuda_time": 0.022187438466346923, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.28, "pct_cuda_time": 0.061994313361851704, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.003728981254848223, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.141, "cuda_time_us": 83.584, "pct_cuda_time": 0.24350247594158897, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.848, "pct_cuda_time": 0.24135831172005123, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.154, "cuda_time_us": 15.648, "pct_cuda_time": 0.04558679584051952, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.04558679584051952, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 478.874, "cuda_time_us": 785.333, "pct_cuda_time": 2.287884402979468, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 158.442, "cuda_time_us": 482.937, "pct_cuda_time": 1.4069242345879969, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 482.201, "pct_cuda_time": 1.4047800703664592, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 103.301, "cuda_time_us": 67.199, "pct_cuda_time": 0.19576860261292633, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.199, "pct_cuda_time": 0.19576860261292633, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 165.259, "cuda_time_us": 235.197, "pct_cuda_time": 0.6851915657785449, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.397, "pct_cuda_time": 0.6828609524942647, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2434.86, "cuda_time_us": 1050.194, "pct_cuda_time": 3.0594951093391205, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.706, "cuda_time_us": 14.432, "pct_cuda_time": 0.04204426364841371, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.432, "pct_cuda_time": 0.04204426364841371, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1768.644, "cuda_time_us": 235.86900000000003, "pct_cuda_time": 0.6871492809373403, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 161.498, "cuda_time_us": 106.494, "pct_cuda_time": 0.3102454138701614, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 105.726, "pct_cuda_time": 0.3080080251172525, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 541.413, "cuda_time_us": 19.328, "pct_cuda_time": 0.05630761694820816, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.328, "pct_cuda_time": 0.05630761694820816, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 725.691, "cuda_time_us": 29.984, "pct_cuda_time": 0.08735138589481962, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.584, "pct_cuda_time": 0.02209421393497572, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.088, "pct_cuda_time": 0.06143496617362447, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.0038222057862194285, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 193.582, "cuda_time_us": 80.063, "pct_cuda_time": 0.233244864224151, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 79.327, "pct_cuda_time": 0.23110070000261326, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.434, "cuda_time_us": 15.072, "pct_cuda_time": 0.04390875427583782, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.072, "pct_cuda_time": 0.04390875427583782, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.191, "cuda_time_us": 784.8209999999999, "pct_cuda_time": 2.286392810477529, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.07, "cuda_time_us": 482.137, "pct_cuda_time": 1.404593621303717, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.401, "pct_cuda_time": 1.402449457082179, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.039, "cuda_time_us": 67.071, "pct_cuda_time": 0.1953957044874415, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.071, "pct_cuda_time": 0.1953957044874415, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.936, "cuda_time_us": 235.613, "pct_cuda_time": 0.6864034846863706, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.813, "pct_cuda_time": 0.6840728714020904, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2481.461, "cuda_time_us": 1058.1309999999999, "pct_cuda_time": 3.0826177063857845, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.079, "cuda_time_us": 15.04, "pct_cuda_time": 0.04381552974446661, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.04, "pct_cuda_time": 0.04381552974446661, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1714.989, "cuda_time_us": 241.277, "pct_cuda_time": 0.7029042267390739, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 148.021, "cuda_time_us": 107.006, "pct_cuda_time": 0.3117370063721007, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.238, "pct_cuda_time": 0.30949961761919176, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 492.05, "cuda_time_us": 19.392, "pct_cuda_time": 0.05649406601095057, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.392, "pct_cuda_time": 0.05649406601095057, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 734.698, "cuda_time_us": 30.112000000000002, "pct_cuda_time": 0.08772428402030445, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.552, "pct_cuda_time": 0.022000989403604513, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.088, "pct_cuda_time": 0.06143496617362447, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.004288328443075456, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 199.549, "cuda_time_us": 84.76700000000001, "pct_cuda_time": 0.24694887033571822, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 84.031, "pct_cuda_time": 0.24480470611418048, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 99.798, "cuda_time_us": 15.104, "pct_cuda_time": 0.044001978807209026, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.104, "pct_cuda_time": 0.044001978807209026, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 521.111, "cuda_time_us": 786.7099999999999, "pct_cuda_time": 2.2918959710950353, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.372, "cuda_time_us": 482.87399999999997, "pct_cuda_time": 1.4067406987918598, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 482.138, "pct_cuda_time": 1.4045965345703222, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 132.36, "cuda_time_us": 66.815, "pct_cuda_time": 0.19464990823647188, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.815, "pct_cuda_time": 0.19464990823647188, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 173.841, "cuda_time_us": 237.021, "pct_cuda_time": 0.6905053640667036, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 236.189, "pct_cuda_time": 0.6880815262510522, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2431.228, "cuda_time_us": 1057.011, "pct_cuda_time": 3.0793548477877923, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.082, "cuda_time_us": 14.72, "pct_cuda_time": 0.042883284430754565, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.72, "pct_cuda_time": 0.042883284430754565, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1739.264, "cuda_time_us": 239.54899999999998, "pct_cuda_time": 0.6978701020450288, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.234, "cuda_time_us": 106.367, "pct_cuda_time": 0.30987542901128196, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 105.599, "pct_cuda_time": 0.30763804025837305, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 526.556, "cuda_time_us": 19.327, "pct_cuda_time": 0.05630470368160282, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.327, "pct_cuda_time": 0.05630470368160282, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 728.092, "cuda_time_us": 30.495, "pct_cuda_time": 0.08884006513015356, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.679, "pct_cuda_time": 0.022370974262483988, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.376, "pct_cuda_time": 0.062273986955965324, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.004195103911704251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.099, "cuda_time_us": 83.36, "pct_cuda_time": 0.2428499042219905, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 1.952, "pct_cuda_time": 0.005686696413643539, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.408, "pct_cuda_time": 0.23716320780834696, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.504, "cuda_time_us": 15.808, "pct_cuda_time": 0.04605291849737555, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.808, "pct_cuda_time": 0.04605291849737555, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.967, "cuda_time_us": 786.934, "pct_cuda_time": 2.2925485428146337, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 168.064, "cuda_time_us": 482.938, "pct_cuda_time": 1.4069271478546024, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 482.202, "pct_cuda_time": 1.4047829836330648, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.095, "cuda_time_us": 67.327, "pct_cuda_time": 0.1961415007384112, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.327, "pct_cuda_time": 0.1961415007384112, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.79, "cuda_time_us": 236.669, "pct_cuda_time": 0.6894798942216204, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.869, "pct_cuda_time": 0.6871492809373402, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2730.716, "cuda_time_us": 1055.156, "pct_cuda_time": 3.073950738234868, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.278, "cuda_time_us": 14.528, "pct_cuda_time": 0.04232393724252733, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.528, "pct_cuda_time": 0.04232393724252733, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 2049.143, "cuda_time_us": 239.325, "pct_cuda_time": 0.6972175303254303, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 149.311, "cuda_time_us": 107.262, "pct_cuda_time": 0.3124828026230703, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.494, "pct_cuda_time": 0.3102454138701614, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 486.657, "cuda_time_us": 19.04, "pct_cuda_time": 0.055468596165867315, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.04, "pct_cuda_time": 0.055468596165867315, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1078.095, "cuda_time_us": 29.761, "pct_cuda_time": 0.08670172744182653, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.584, "pct_cuda_time": 0.02209421393497572, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.897, "pct_cuda_time": 0.06087853225200258, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.003728981254848223, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 199.036, "cuda_time_us": 83.262, "pct_cuda_time": 0.2425644040946662, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.002141250954932378, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.527, "pct_cuda_time": 0.2404231531397338, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.047, "cuda_time_us": 15.872, "pct_cuda_time": 0.04623936756011796, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.872, "pct_cuda_time": 0.04623936756011796, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 454.001, "cuda_time_us": 785.431, "pct_cuda_time": 2.288169903106793, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 152.174, "cuda_time_us": 482.363, "pct_cuda_time": 1.405252019556526, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0021470774881430783, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.626, "pct_cuda_time": 1.4031049420683828, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.628, "cuda_time_us": 66.655, "pct_cuda_time": 0.19418378557961585, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.655, "pct_cuda_time": 0.19418378557961585, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.764, "cuda_time_us": 236.413, "pct_cuda_time": 0.6887340979706508, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.613, "pct_cuda_time": 0.6864034846863706, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2486.154, "cuda_time_us": 1051.8890000000001, "pct_cuda_time": 3.06443309623519, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.857, "cuda_time_us": 14.272, "pct_cuda_time": 0.04157814099155768, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.272, "pct_cuda_time": 0.04157814099155768, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1776.623, "cuda_time_us": 237.72500000000002, "pct_cuda_time": 0.6925563037568702, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 145.528, "cuda_time_us": 106.81400000000001, "pct_cuda_time": 0.3111776591838735, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.046, "pct_cuda_time": 0.30894027043096456, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 522.524, "cuda_time_us": 19.712, "pct_cuda_time": 0.057426311324662624, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.712, "pct_cuda_time": 0.057426311324662624, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 740.673, "cuda_time_us": 30.208000000000002, "pct_cuda_time": 0.08800395761441807, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.936, "pct_cuda_time": 0.02311968378005898, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.992, "pct_cuda_time": 0.061155292579510856, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.003728981254848223, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 217.37, "cuda_time_us": 80.991, "pct_cuda_time": 0.23594837563391596, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.255, "pct_cuda_time": 0.23380421141237817, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 97.909, "cuda_time_us": 15.359, "pct_cuda_time": 0.04474486179157332, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.359, "pct_cuda_time": 0.04474486179157332, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 473.543, "cuda_time_us": 784.5330000000001, "pct_cuda_time": 2.2855537896951885, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 164.325, "cuda_time_us": 482.39300000000003, "pct_cuda_time": 1.4053394175546865, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.002141250954932378, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.658, "pct_cuda_time": 1.4031981665997542, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.241, "cuda_time_us": 66.623, "pct_cuda_time": 0.19409056104824465, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.623, "pct_cuda_time": 0.19409056104824465, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 156.94, "cuda_time_us": 235.51700000000002, "pct_cuda_time": 0.6861238110922571, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.717, "pct_cuda_time": 0.6837931978079769, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2561.769, "cuda_time_us": 1055.7640000000001, "pct_cuda_time": 3.0757220043309217, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.052, "cuda_time_us": 14.721, "pct_cuda_time": 0.042886197697359914, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.721, "pct_cuda_time": 0.042886197697359914, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1813.829, "cuda_time_us": 239.293, "pct_cuda_time": 0.6971243057940592, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 184.594, "cuda_time_us": 106.91, "pct_cuda_time": 0.3114573327779871, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.002141250954932378, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.175, "pct_cuda_time": 0.30931608182305476, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 556.225, "cuda_time_us": 19.552, "pct_cuda_time": 0.056960188667806604, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.552, "pct_cuda_time": 0.056960188667806604, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 733.292, "cuda_time_us": 30.336000000000002, "pct_cuda_time": 0.0883768557399029, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.584, "pct_cuda_time": 0.02209421393497572, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.248, "pct_cuda_time": 0.0619010888304805, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.004381552974446662, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.525, "cuda_time_us": 82.495, "pct_cuda_time": 0.2403299286083626, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.759, "pct_cuda_time": 0.2381857643868249, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.512, "cuda_time_us": 15.808, "pct_cuda_time": 0.04605291849737555, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.808, "pct_cuda_time": 0.04605291849737555, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 458.643, "cuda_time_us": 785.942, "pct_cuda_time": 2.2896585823421267, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.628, "cuda_time_us": 482.618, "pct_cuda_time": 1.4059949025408902, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 481.882, "pct_cuda_time": 1.4038507383193526, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.893, "cuda_time_us": 67.36, "pct_cuda_time": 0.1962376385363877, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.36, "pct_cuda_time": 0.1962376385363877, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 157.532, "cuda_time_us": 235.964, "pct_cuda_time": 0.6874260412648485, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.164, "pct_cuda_time": 0.6850954279805683, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2402.776, "cuda_time_us": 1056.8500000000001, "pct_cuda_time": 3.078885811864332, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.892, "cuda_time_us": 14.304, "pct_cuda_time": 0.04167136552292889, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.304, "pct_cuda_time": 0.04167136552292889, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1705.411, "cuda_time_us": 240.82799999999997, "pct_cuda_time": 0.7015961700332717, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.972, "cuda_time_us": 107.423, "pct_cuda_time": 0.31295183854653175, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.655, "pct_cuda_time": 0.31071444979362284, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 472.724, "cuda_time_us": 19.584, "pct_cuda_time": 0.05705341319917781, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.584, "pct_cuda_time": 0.05705341319917781, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 738.627, "cuda_time_us": 30.111, "pct_cuda_time": 0.08772137075369908, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.679, "pct_cuda_time": 0.022370974262483988, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.928, "pct_cuda_time": 0.06096884351676845, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.004381552974446662, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.701, "cuda_time_us": 83.71, "pct_cuda_time": 0.24386954753386303, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.704, "pct_cuda_time": 0.0020509396901665223, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.006, "pct_cuda_time": 0.24181860784369655, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.079, "cuda_time_us": 15.04, "pct_cuda_time": 0.04381552974446661, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.04, "pct_cuda_time": 0.04381552974446661, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 468.839, "cuda_time_us": 786.6780000000001, "pct_cuda_time": 2.2918027465636643, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.46, "cuda_time_us": 483.706, "pct_cuda_time": 1.4091645366075112, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 482.97, "pct_cuda_time": 1.4070203723859736, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.359, "cuda_time_us": 67.167, "pct_cuda_time": 0.19567537808155513, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.167, "pct_cuda_time": 0.19567537808155513, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.689, "cuda_time_us": 235.805, "pct_cuda_time": 0.6869628318745978, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.005, "pct_cuda_time": 0.6846322185903176, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2435.914, "cuda_time_us": 1057.745, "pct_cuda_time": 3.08149318547612, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.268, "cuda_time_us": 14.656, "pct_cuda_time": 0.04269683536801215, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.656, "pct_cuda_time": 0.04269683536801215, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1759.623, "cuda_time_us": 239.228, "pct_cuda_time": 0.6969349434647115, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 153.994, "cuda_time_us": 106.494, "pct_cuda_time": 0.3102454138701614, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 105.758, "pct_cuda_time": 0.3081012496486237, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 553.029, "cuda_time_us": 19.488, "pct_cuda_time": 0.0567737396050642, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.488, "pct_cuda_time": 0.0567737396050642, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 719.15, "cuda_time_us": 30.527, "pct_cuda_time": 0.08893328966152476, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.776, "pct_cuda_time": 0.02265356112320295, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.471, "pct_cuda_time": 0.06255074728347358, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.003728981254848223, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.83, "cuda_time_us": 82.719, "pct_cuda_time": 0.240982500327961, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.951, "pct_cuda_time": 0.23874511157505207, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.312, "cuda_time_us": 15.327, "pct_cuda_time": 0.04465163726020212, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.327, "pct_cuda_time": 0.04465163726020212, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.995, "cuda_time_us": 788.5339999999999, "pct_cuda_time": 2.2972097693831937, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 153.965, "cuda_time_us": 484.506, "pct_cuda_time": 1.4114951498917914, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 483.738, "pct_cuda_time": 1.4092577611388826, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.832, "cuda_time_us": 67.103, "pct_cuda_time": 0.1954889290188127, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.103, "pct_cuda_time": 0.1954889290188127, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.763, "cuda_time_us": 236.925, "pct_cuda_time": 0.69022569047259, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 236.125, "pct_cuda_time": 0.6878950771883098, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2430.668, "cuda_time_us": 1052.212, "pct_cuda_time": 3.0653740813487174, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.15, "cuda_time_us": 14.176, "pct_cuda_time": 0.04129846739744407, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.176, "pct_cuda_time": 0.04129846739744407, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1762.33, "cuda_time_us": 236.159, "pct_cuda_time": 0.6879941282528917, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 147.711, "cuda_time_us": 107.35900000000001, "pct_cuda_time": 0.31276538948378935, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.623, "pct_cuda_time": 0.3106212252622516, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 542.132, "cuda_time_us": 19.136, "pct_cuda_time": 0.05574826975998093, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.136, "pct_cuda_time": 0.05574826975998093, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 741.948, "cuda_time_us": 29.729, "pct_cuda_time": 0.08660850291045533, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.52, "pct_cuda_time": 0.021907764872233306, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.864, "pct_cuda_time": 0.06078239445402603, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.345, "pct_cuda_time": 0.0039183435841959845, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 188.535, "cuda_time_us": 79.935, "pct_cuda_time": 0.23287196609866617, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 79.199, "pct_cuda_time": 0.23072780187712844, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.511, "cuda_time_us": 15.008, "pct_cuda_time": 0.04372230521309541, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.008, "pct_cuda_time": 0.04372230521309541, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 450.849, "cuda_time_us": 786.8689999999999, "pct_cuda_time": 2.2923591804852856, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.385, "cuda_time_us": 483.962, "pct_cuda_time": 1.4099103328584808, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 483.226, "pct_cuda_time": 1.4077661686369431, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.097, "cuda_time_us": 66.399, "pct_cuda_time": 0.1934379893286462, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.399, "pct_cuda_time": 0.1934379893286462, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 146.111, "cuda_time_us": 236.50799999999998, "pct_cuda_time": 0.6890108582981589, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.676, "pct_cuda_time": 0.6865870204825075, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2425.899, "cuda_time_us": 1062.6419999999998, "pct_cuda_time": 3.095759452042519, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.738, "cuda_time_us": 14.464, "pct_cuda_time": 0.04213748817978492, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.464, "pct_cuda_time": 0.04213748817978492, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1723.62, "cuda_time_us": 241.53300000000002, "pct_cuda_time": 0.7036500229900436, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 155.541, "cuda_time_us": 107.934, "pct_cuda_time": 0.31444051778186566, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 107.198, "pct_cuda_time": 0.3122963535603279, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 510.498, "cuda_time_us": 19.84, "pct_cuda_time": 0.05779920945014745, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.84, "pct_cuda_time": 0.05779920945014745, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 728.468, "cuda_time_us": 30.656000000000002, "pct_cuda_time": 0.08930910105361493, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.808, "pct_cuda_time": 0.022746785654574157, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.376, "pct_cuda_time": 0.062273986955965324, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.004288328443075456, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 191.963, "cuda_time_us": 83.10300000000001, "pct_cuda_time": 0.24210119470441552, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 82.367, "pct_cuda_time": 0.23995703048287778, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.148, "cuda_time_us": 15.744, "pct_cuda_time": 0.045866469434633135, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.744, "pct_cuda_time": 0.045866469434633135, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 482.048, "cuda_time_us": 790.901, "pct_cuda_time": 2.3041054714380578, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 160.437, "cuda_time_us": 485.68899999999996, "pct_cuda_time": 1.4149415442859206, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 484.953, "pct_cuda_time": 1.412797380064383, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.598, "cuda_time_us": 67.519, "pct_cuda_time": 0.19670084792663842, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.519, "pct_cuda_time": 0.19670084792663842, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 151.672, "cuda_time_us": 237.69299999999998, "pct_cuda_time": 0.6924630792254989, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.832, "pct_cuda_time": 0.0024238378156513445, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 236.861, "pct_cuda_time": 0.6900392414098475, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2641.146, "cuda_time_us": 1060.049, "pct_cuda_time": 3.0882053517348464, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.083, "cuda_time_us": 14.208, "pct_cuda_time": 0.04139169192881527, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.208, "pct_cuda_time": 0.04139169192881527, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1947.477, "cuda_time_us": 241.98000000000002, "pct_cuda_time": 0.7049522531626351, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 150.148, "cuda_time_us": 107.678, "pct_cuda_time": 0.31369472153089606, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.942, "pct_cuda_time": 0.31155055730935827, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 537.84, "cuda_time_us": 20.064, "pct_cuda_time": 0.05845178116974589, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.064, "pct_cuda_time": 0.05845178116974589, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 917.571, "cuda_time_us": 30.271000000000004, "pct_cuda_time": 0.08818749341055514, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.551, "pct_cuda_time": 0.021998076136999164, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.28, "pct_cuda_time": 0.061994313361851704, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.004195103911704251, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 196.885, "cuda_time_us": 83.967, "pct_cuda_time": 0.24461825705143803, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.199, "pct_cuda_time": 0.24238086829852912, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 78.414, "cuda_time_us": 15.296, "pct_cuda_time": 0.04456132599543626, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.296, "pct_cuda_time": 0.04456132599543626, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.888, "cuda_time_us": 788.565, "pct_cuda_time": 2.29730008064796, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 178.056, "cuda_time_us": 485.433, "pct_cuda_time": 1.414195748034951, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 484.697, "pct_cuda_time": 1.4120515838134133, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.563, "cuda_time_us": 67.615, "pct_cuda_time": 0.19698052152075202, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.615, "pct_cuda_time": 0.19698052152075202, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 147.044, "cuda_time_us": 235.51700000000002, "pct_cuda_time": 0.6861238110922571, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 234.717, "pct_cuda_time": 0.6837931978079769, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2590.338, "cuda_time_us": 1058.417, "pct_cuda_time": 3.083450900634915, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 70.309, "cuda_time_us": 14.4, "pct_cuda_time": 0.0419510391170425, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.4, "pct_cuda_time": 0.0419510391170425, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1885.034, "cuda_time_us": 239.356, "pct_cuda_time": 0.6973078415901962, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 174.419, "cuda_time_us": 107.83800000000001, "pct_cuda_time": 0.3141608441877521, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 107.102, "pct_cuda_time": 0.31201667996621435, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 588.979, "cuda_time_us": 19.52, "pct_cuda_time": 0.0568669641364354, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.52, "pct_cuda_time": 0.0568669641364354, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 757.117, "cuda_time_us": 30.016000000000002, "pct_cuda_time": 0.08744461042619084, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.552, "pct_cuda_time": 0.022000989403604513, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.184, "pct_cuda_time": 0.06171463976773809, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.003728981254848223, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 212.681, "cuda_time_us": 81.982, "pct_cuda_time": 0.23883542283981798, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.246, "pct_cuda_time": 0.2366912586182802, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.278, "cuda_time_us": 15.199, "pct_cuda_time": 0.04427873913471729, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.199, "pct_cuda_time": 0.04427873913471729, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 471.275, "cuda_time_us": 789.462, "pct_cuda_time": 2.2999132807929588, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 163.363, "cuda_time_us": 485.498, "pct_cuda_time": 1.4143851103642988, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 484.762, "pct_cuda_time": 1.412240946142761, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.697, "cuda_time_us": 67.583, "pct_cuda_time": 0.19688729698938082, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.583, "pct_cuda_time": 0.19688729698938082, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 155.464, "cuda_time_us": 236.381, "pct_cuda_time": 0.6886408734392795, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.581, "pct_cuda_time": 0.6863102601549993, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2441.869, "cuda_time_us": 1059.6370000000002, "pct_cuda_time": 3.0870050858934426, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 91.692, "cuda_time_us": 14.464, "pct_cuda_time": 0.04213748817978492, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.464, "pct_cuda_time": 0.04213748817978492, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1705.74, "cuda_time_us": 238.334, "pct_cuda_time": 0.6943304831195284, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 154.588, "cuda_time_us": 107.104, "pct_cuda_time": 0.31202250649942503, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.769, "pct_cuda_time": 0.002240302019514284, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.335, "pct_cuda_time": 0.3097822044799107, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 501.152, "cuda_time_us": 19.296, "pct_cuda_time": 0.05621439241683695, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.296, "pct_cuda_time": 0.05621439241683695, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 720.461, "cuda_time_us": 29.887, "pct_cuda_time": 0.08706879903410066, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.584, "pct_cuda_time": 0.02209421393497572, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 20.992, "pct_cuda_time": 0.061155292579510856, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.311, "pct_cuda_time": 0.003819292519614078, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.895, "cuda_time_us": 82.047, "pct_cuda_time": 0.23902478516916573, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.279, "pct_cuda_time": 0.2367873964162568, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.254, "cuda_time_us": 15.648, "pct_cuda_time": 0.04558679584051952, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.04558679584051952, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 487.085, "cuda_time_us": 791.191, "pct_cuda_time": 2.3049503187536096, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 154.154, "cuda_time_us": 486.586, "pct_cuda_time": 1.4175547444309198, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 485.85, "pct_cuda_time": 1.415410580209382, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 126.205, "cuda_time_us": 66.527, "pct_cuda_time": 0.19381088745413103, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 66.527, "pct_cuda_time": 0.19381088745413103, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 154.892, "cuda_time_us": 238.078, "pct_cuda_time": 0.6935846868685588, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 237.278, "pct_cuda_time": 0.6912540735842786, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2510.153, "cuda_time_us": 1056.9450000000002, "pct_cuda_time": 3.0791625721918403, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 104.807, "cuda_time_us": 14.688, "pct_cuda_time": 0.04279005989938336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.688, "pct_cuda_time": 0.04279005989938336, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1788.386, "cuda_time_us": 237.596, "pct_cuda_time": 0.69218049236478, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.074, "cuda_time_us": 107.743, "pct_cuda_time": 0.3138840838602438, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.975, "pct_cuda_time": 0.31164669510733484, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.299, "cuda_time_us": 19.232, "pct_cuda_time": 0.05602794335409454, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.232, "pct_cuda_time": 0.05602794335409454, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 799.753, "cuda_time_us": 30.495, "pct_cuda_time": 0.08884006513015356, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.584, "pct_cuda_time": 0.02209421393497572, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.407, "pct_cuda_time": 0.06236429822073118, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.004381552974446662, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 201.197, "cuda_time_us": 80.126, "pct_cuda_time": 0.23342840002028806, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.735, "pct_cuda_time": 0.002141250954932378, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 79.391, "pct_cuda_time": 0.23128714906535566, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.631, "cuda_time_us": 15.615, "pct_cuda_time": 0.045490658042542965, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.615, "pct_cuda_time": 0.045490658042542965, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.778, "cuda_time_us": 789.046, "pct_cuda_time": 2.2987013618851337, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 157.745, "cuda_time_us": 484.89, "pct_cuda_time": 1.4126138442682459, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 484.154, "pct_cuda_time": 1.4104696800467083, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.285, "cuda_time_us": 67.167, "pct_cuda_time": 0.19567537808155513, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.167, "pct_cuda_time": 0.19567537808155513, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 150.852, "cuda_time_us": 236.989, "pct_cuda_time": 0.6904121395353324, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 236.189, "pct_cuda_time": 0.6880815262510522, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2468.466, "cuda_time_us": 1059.795, "pct_cuda_time": 3.0874653820170876, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.657, "cuda_time_us": 14.367, "pct_cuda_time": 0.041854901319065954, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.367, "pct_cuda_time": 0.041854901319065954, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1754.843, "cuda_time_us": 241.279, "pct_cuda_time": 0.7029100532722846, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 152.17, "cuda_time_us": 107.711, "pct_cuda_time": 0.3137908593288726, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.975, "pct_cuda_time": 0.31164669510733484, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 530.745, "cuda_time_us": 19.424, "pct_cuda_time": 0.056587290542321776, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 19.424, "pct_cuda_time": 0.056587290542321776, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 743.739, "cuda_time_us": 30.272000000000002, "pct_cuda_time": 0.08819040667716047, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.648, "pct_cuda_time": 0.02228066299771813, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.12, "pct_cuda_time": 0.06152819070499568, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.004381552974446662, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 186.325, "cuda_time_us": 83.872, "pct_cuda_time": 0.2443414967239298, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0021470774881430783, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 83.135, "pct_cuda_time": 0.24219441923578672, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.526, "cuda_time_us": 15.616, "pct_cuda_time": 0.045493571309148315, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.045493571309148315, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 482.912, "cuda_time_us": 788.533, "pct_cuda_time": 2.297206856116589, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 155.413, "cuda_time_us": 484.85699999999997, "pct_cuda_time": 1.4125177064702692, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 484.121, "pct_cuda_time": 1.4103735422487316, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.171, "cuda_time_us": 67.104, "pct_cuda_time": 0.19549184228541808, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.104, "pct_cuda_time": 0.19549184228541808, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 152.837, "cuda_time_us": 236.572, "pct_cuda_time": 0.6891973073609013, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.864, "pct_cuda_time": 0.0025170623470225505, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 235.708, "pct_cuda_time": 0.6866802450138788, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2329.091, "cuda_time_us": 1056.469, "pct_cuda_time": 3.077775857287693, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.111, "cuda_time_us": 14.464, "pct_cuda_time": 0.04213748817978492, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.464, "pct_cuda_time": 0.04213748817978492, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1659.008, "cuda_time_us": 237.47, "pct_cuda_time": 0.6918134207725057, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.692, "cuda_time_us": 107.551, "pct_cuda_time": 0.31332473667201655, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 106.783, "pct_cuda_time": 0.31108734791910764, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[1536, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 495.444, "cuda_time_us": 20.128, "pct_cuda_time": 0.0586382302324883, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 20.128, "pct_cuda_time": 0.0586382302324883, "trace": "_C::rotary_embedding(int64[1536], bfloat16[1536, 4096], bfloat16[1536, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 697.2, "cuda_time_us": 30.047, "pct_cuda_time": 0.08753492169095668, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 7.744, "pct_cuda_time": 0.022560336591831747, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[1536], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, false, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 21.023, "pct_cuda_time": 0.0612456038442767, "trace": "_vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.003728981254848223, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], None, None, bfloat16[1536, 32, 128], int32[13], int32[13], None, None, None, 128, 128, None, None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[1536, 32, 128], bfloat16[1536, 8, 128], bfloat16[1536, 8, 128], bfloat16[1536, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 178.035, "cuda_time_us": 79.744, "pct_cuda_time": 0.23231553217704426, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0021470774881430783, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 79.007, "pct_cuda_time": 0.2301684546889012, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[1536, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.318, "cuda_time_us": 15.648, "pct_cuda_time": 0.04558679584051952, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.04558679584051952, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 439.726, "cuda_time_us": 788.8870000000001, "pct_cuda_time": 2.298238152494883, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 152.636, "cuda_time_us": 484.63500000000005, "pct_cuda_time": 1.4118709612838818, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.0021470774881430783, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 483.898, "pct_cuda_time": 1.4097238837957387, "trace": "mm(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[1536, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[1536, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.039, "cuda_time_us": 67.391, "pct_cuda_time": 0.19632794980115356, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 67.391, "pct_cuda_time": 0.19632794980115356, "trace": "_C::silu_and_mul(bfloat16[1536, 14336], bfloat16[1536, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.214, "cuda_time_us": 236.86100000000002, "pct_cuda_time": 0.6900392414098476, "trace": "" }, "children": [ { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_on_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 236.061, "pct_cuda_time": 0.6877086281255674, "trace": "mm(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[1536, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[1536, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.578, "cuda_time_us": 14.559, "pct_cuda_time": 0.04241424850729318, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 14.559, "pct_cuda_time": 0.04241424850729318, "trace": "_C::fused_add_rms_norm(bfloat16[1536, 4096], bfloat16[1536, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 522.073, "cuda_time_us": 363.9, "pct_cuda_time": 1.0601377176869282, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 7.233, "pct_cuda_time": 0.021071657356497808, "trace": "index_select(bfloat16[1536, 4096], 0, int64[12])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[12, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 355.931, "pct_cuda_time": 1.0369218961088926, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[12, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3939.941, "cuda_time_us": 127.26100000000001, "pct_cuda_time": 0.37074522146346856, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.002144164221537728, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "copy_(int32[12], int32[12], True) <- _to_copy(int32[12], 3, 0, None, None, True, None) <- to(int32[12], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.768, "pct_cuda_time": 0.0022373887529089334, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.0023306132842801394, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 7.008, "pct_cuda_time": 0.02041617237029402, "trace": "copy_(float32[12, 128256], bfloat16[12, 128256], False) <- _to_copy(bfloat16[12, 128256], 6, None, None, None, False, None) <- to(bfloat16[12, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 8.544, "pct_cuda_time": 0.02489094987611189, "trace": "div_(float32[12, 128256], bfloat16[12, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 35.743, "pct_cuda_time": 0.10412888827503128, "trace": "_softmax(float32[12, 128256], -1, False) <- softmax(float32[12, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.831, "pct_cuda_time": 0.08399238949885086, "trace": "_log_softmax(float32[12, 128256], -1, False) <- log_softmax(float32[12, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 2.048, "pct_cuda_time": 0.005966370007757156, "trace": "copy_(int64[12], int32[12], False) <- _to_copy(int32[12], 4, None, None, None, False, None) <- to(int32[12], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.026755440503535993, "trace": "index(float32[12, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 27.744, "pct_cuda_time": 0.08082566869883523, "trace": "argmax(float32[12, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.783, "pct_cuda_time": 0.008107620962689535, "trace": "copy_(int64[12], int64[12], False) <- _to_copy(int64[12], 4, 0, None, None, False, None) <- to(int64[12], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] }, "decode_1": { "metadata": { "num_running_seqs": 12 }, "summary_stats": [ { "entry": { "name": "LlamaForCausalLM", "cuda_time_us": 6306.274000000001, "pct_cuda_time": 92.97781161620733, "invocations": 1 }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cuda_time_us": 9.376, "pct_cuda_time": 0.13823693066834075, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 9.376, "pct_cuda_time": 0.13823693066834075, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cuda_time_us": 6293.825000000001, "pct_cuda_time": 92.79426729561324, "invocations": 32 }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 204.31100000000004, "pct_cuda_time": 3.012300079114694, "invocations": 64 }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 4.385, "pct_cuda_time": 0.06465112425135176, "invocations": 1 }, "children": [] }, { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 199.92600000000002, "pct_cuda_time": 2.9476489548633418, "invocations": 63 }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cuda_time_us": 1790.2779999999998, "pct_cuda_time": 26.395321647083588, "invocations": 32 }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cuda_time_us": 684.5010000000001, "pct_cuda_time": 10.09207735488587, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 684.5010000000001, "pct_cuda_time": 10.09207735488587, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cuda_time_us": 117.856, "pct_cuda_time": 1.7376335005170613, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cuda_time_us": 117.856, "pct_cuda_time": 1.7376335005170613, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "Attention", "cuda_time_us": 483.0649999999999, "pct_cuda_time": 7.122165413108148, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cuda_time_us": 79.04, "pct_cuda_time": 1.1653420435180943, "invocations": 32 }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cuda_time_us": 359.322, "pct_cuda_time": 5.2977357510249075, "invocations": 32 }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cuda_time_us": 44.703, "pct_cuda_time": 0.659087618565149, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cuda_time_us": 504.856, "pct_cuda_time": 7.443445378572508, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 504.856, "pct_cuda_time": 7.443445378572508, "invocations": 32 }, "children": [] } ] } ] }, { "entry": { "name": "LlamaMLP", "cuda_time_us": 4299.236000000001, "pct_cuda_time": 63.386645569414966, "invocations": 32 }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cuda_time_us": 2604.343999999999, "pct_cuda_time": 38.397666485122556, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 2604.343999999999, "pct_cuda_time": 38.397666485122556, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cuda_time_us": 290.17300000000006, "pct_cuda_time": 4.278223643645952, "invocations": 32 }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cuda_time_us": 290.17300000000006, "pct_cuda_time": 4.278223643645952, "invocations": 32 }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cuda_time_us": 1404.7190000000003, "pct_cuda_time": 20.710755440646437, "invocations": 32 }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 1404.7190000000003, "pct_cuda_time": 20.710755440646437, "invocations": 32 }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cuda_time_us": 3.073, "pct_cuda_time": 0.04530738992574777, "invocations": 1 }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cuda_time_us": 3.073, "pct_cuda_time": 0.04530738992574777, "invocations": 1 }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cuda_time_us": 351.38800000000003, "pct_cuda_time": 5.1807592356748, "invocations": 1 }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cuda_time_us": 7.231, "pct_cuda_time": 0.1066116942899714, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memset (Device)", "cuda_time_us": 0.737, "pct_cuda_time": 0.010866106858208952, "invocations": 1 }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cuda_time_us": 343.42, "pct_cuda_time": 5.063281434526619, "invocations": 1 }, "children": [] } ] }, { "entry": { "name": "Sampler", "cuda_time_us": 124.896, "pct_cuda_time": 1.841429148117863, "invocations": 1 }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cuda_time_us": 5.441, "pct_cuda_time": 0.08022047139147205, "invocations": 7 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 6.784, "pct_cuda_time": 0.10002126041531822, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cuda_time_us": 8.928, "pct_cuda_time": 0.13163175309374428, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 34.816, "pct_cuda_time": 0.5133166572257841, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cuda_time_us": 28.192, "pct_cuda_time": 0.41565438880139316, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cuda_time_us": 1.824, "pct_cuda_time": 0.026892508696571407, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cuda_time_us": 9.056, "pct_cuda_time": 0.13351894668648612, "invocations": 1 }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cuda_time_us": 27.295, "pct_cuda_time": 0.4024292899522569, "invocations": 1 }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cuda_time_us": 2.56, "pct_cuda_time": 0.03774387185483707, "invocations": 1 }, "children": [] } ] } ], "model_stats": [ { "entry": { "name": "LlamaForCausalLM", "cpu_time_us": 79570.592, "cuda_time_us": 6306.274000000001, "pct_cuda_time": 92.97781161620733, "trace": "" }, "children": [ { "entry": { "name": "VocabParallelEmbedding(weight=bfloat16[128256, 4096])", "cpu_time_us": 299.024, "cuda_time_us": 9.376, "pct_cuda_time": 0.13823693066834075, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 9.376, "pct_cuda_time": 0.13823693066834075, "trace": "index_select(bfloat16[128256, 4096], 0, int64[12]) <- embedding(bfloat16[128256, 4096], int64[12], -1, False, False)" }, "children": [] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 4470.946, "cuda_time_us": 204.766, "pct_cuda_time": 3.019008462588893, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 271.977, "cuda_time_us": 4.385, "pct_cuda_time": 0.06465112425135176, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rms_norm_kernel(c10::BFloat16*, c10::BFloat16 const*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.385, "pct_cuda_time": 0.06465112425135176, "trace": "_C::rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 3306.463, "cuda_time_us": 62.494, "pct_cuda_time": 0.9213927842563233, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 668.393, "cuda_time_us": 26.719, "pct_cuda_time": 0.39393691878491854, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 26.719, "pct_cuda_time": 0.39393691878491854, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 1030.463, "cuda_time_us": 3.712, "pct_cuda_time": 0.05472861418951375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05472861418951375, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 1050.218, "cuda_time_us": 15.392, "pct_cuda_time": 0.22693502952720784, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.304, "pct_cuda_time": 0.03396948466935336, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.808, "pct_cuda_time": 0.17409360893043596, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018871935927418534, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 286.509, "cuda_time_us": 16.671, "pct_cuda_time": 0.24579222175468307, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.671, "pct_cuda_time": 0.24579222175468307, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 124.254, "cuda_time_us": 3.36, "pct_cuda_time": 0.049538831809473646, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.36, "pct_cuda_time": 0.049538831809473646, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 613.318, "cuda_time_us": 134.527, "pct_cuda_time": 1.9834257222717442, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 209.794, "cuda_time_us": 81.567, "pct_cuda_time": 1.2025993732748026, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.567, "pct_cuda_time": 1.2025993732748026, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 148.003, "cuda_time_us": 9.184, "pct_cuda_time": 0.13540614027922795, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.184, "pct_cuda_time": 0.13540614027922795, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 186.459, "cuda_time_us": 43.776, "pct_cuda_time": 0.6454202087177139, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.776, "pct_cuda_time": 0.6454202087177139, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2496.878, "cuda_time_us": 197.564, "pct_cuda_time": 2.912824335597277, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.771, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1794.175, "cuda_time_us": 56.863, "pct_cuda_time": 0.8383710098756251, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 144.507, "cuda_time_us": 21.887, "pct_cuda_time": 0.3226953606589136, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.887, "pct_cuda_time": 0.3226953606589136, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 540.679, "cuda_time_us": 3.585, "pct_cuda_time": 0.052856164296715184, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.585, "pct_cuda_time": 0.052856164296715184, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 758.747, "cuda_time_us": 15.711999999999998, "pct_cuda_time": 0.23165301350906248, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.88, "pct_cuda_time": 0.042461855836691695, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.328, "pct_cuda_time": 0.16701663295765398, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.022174524714716776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.483, "cuda_time_us": 15.679, "pct_cuda_time": 0.23116647141093374, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.679, "pct_cuda_time": 0.23116647141093374, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.247, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.118, "cuda_time_us": 134.333, "pct_cuda_time": 1.9805654444827452, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 143.931, "cuda_time_us": 80.766, "pct_cuda_time": 1.190789669620223, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.766, "pct_cuda_time": 1.190789669620223, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 104.169, "cuda_time_us": 8.96, "pct_cuda_time": 0.13210355149192973, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.13210355149192973, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.41, "cuda_time_us": 44.607, "pct_cuda_time": 0.6576722233705925, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.607, "pct_cuda_time": 0.6576722233705925, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2434.801, "cuda_time_us": 196.605, "pct_cuda_time": 2.8986851273516563, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.294, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1734.02, "cuda_time_us": 55.358999999999995, "pct_cuda_time": 0.816196485160908, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.1, "cuda_time_us": 20.735, "pct_cuda_time": 0.3057106183242369, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.735, "pct_cuda_time": 0.3057106183242369, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 523.278, "cuda_time_us": 3.679, "pct_cuda_time": 0.054242072091384985, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.679, "pct_cuda_time": 0.054242072091384985, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 742.914, "cuda_time_us": 15.072999999999999, "pct_cuda_time": 0.2222317892452965, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.401, "pct_cuda_time": 0.03539962356385304, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.16465764096672666, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.022174524714716776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 179.993, "cuda_time_us": 15.872, "pct_cuda_time": 0.2340120054999898, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.872, "pct_cuda_time": 0.2340120054999898, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.938, "cuda_time_us": 3.071, "pct_cuda_time": 0.04527790252586118, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.04527790252586118, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 439.41, "cuda_time_us": 135.007, "pct_cuda_time": 1.9905026982445264, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.445, "cuda_time_us": 81.311, "pct_cuda_time": 1.1988249860893192, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.311, "pct_cuda_time": 1.1988249860893192, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.251, "cuda_time_us": 9.376, "pct_cuda_time": 0.13823693066834075, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.376, "pct_cuda_time": 0.13823693066834075, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 143.8, "cuda_time_us": 44.32, "pct_cuda_time": 0.6534407814868667, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.32, "pct_cuda_time": 0.6534407814868667, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2308.457, "cuda_time_us": 195.004, "pct_cuda_time": 2.87508046374244, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.778, "cuda_time_us": 3.072, "pct_cuda_time": 0.04529264622580448, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04529264622580448, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1660.254, "cuda_time_us": 54.847, "pct_cuda_time": 0.8086477107899408, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.129, "cuda_time_us": 20.767, "pct_cuda_time": 0.3061824167224224, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.767, "pct_cuda_time": 0.3061824167224224, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 506.899, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 706.666, "cuda_time_us": 14.655999999999999, "pct_cuda_time": 0.21608366636894216, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.034913081465724284, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.008, "pct_cuda_time": 0.16229864897579935, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018871935927418534, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 172.664, "cuda_time_us": 15.808, "pct_cuda_time": 0.23306840870361886, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.808, "pct_cuda_time": 0.23306840870361886, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.761, "cuda_time_us": 3.135, "pct_cuda_time": 0.0462214993222321, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.0462214993222321, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 420.974, "cuda_time_us": 133.95, "pct_cuda_time": 1.9749186074044627, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 138.438, "cuda_time_us": 81.791, "pct_cuda_time": 1.2059019620621008, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.791, "pct_cuda_time": 1.2059019620621008, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.221, "cuda_time_us": 9.088, "pct_cuda_time": 0.13399074508467157, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.088, "pct_cuda_time": 0.13399074508467157, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.87, "cuda_time_us": 43.071, "pct_cuda_time": 0.6350259002576902, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.071, "pct_cuda_time": 0.6350259002576902, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2386.649, "cuda_time_us": 196.47600000000003, "pct_cuda_time": 2.8967831900589722, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.304, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1700.229, "cuda_time_us": 56.03, "pct_cuda_time": 0.8260895078228596, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 132.397, "cuda_time_us": 21.759, "pct_cuda_time": 0.3208081670661717, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.759, "pct_cuda_time": 0.3208081670661717, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 511.234, "cuda_time_us": 3.712, "pct_cuda_time": 0.05472861418951375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05472861418951375, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 722.138, "cuda_time_us": 15.072, "pct_cuda_time": 0.2222170455453532, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036800275058466135, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.16607303616128305, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019343734325603996, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 190.123, "cuda_time_us": 15.487, "pct_cuda_time": 0.22833568102182092, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.487, "pct_cuda_time": 0.22833568102182092, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.521, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 453.546, "cuda_time_us": 134.11, "pct_cuda_time": 1.9772775993953902, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 146.424, "cuda_time_us": 81.695, "pct_cuda_time": 1.2044865668675444, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.695, "pct_cuda_time": 1.2044865668675444, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.681, "cuda_time_us": 8.992, "pct_cuda_time": 0.1325753498901152, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.1325753498901152, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.756, "cuda_time_us": 43.423, "pct_cuda_time": 0.6402156826377304, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.423, "pct_cuda_time": 0.6402156826377304, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2461.139, "cuda_time_us": 195.57999999999998, "pct_cuda_time": 2.8835728349097782, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.741, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1763.291, "cuda_time_us": 55.422, "pct_cuda_time": 0.8171253382573358, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 136.248, "cuda_time_us": 20.575, "pct_cuda_time": 0.3033516263333096, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.575, "pct_cuda_time": 0.3033516263333096, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 485.51, "cuda_time_us": 3.712, "pct_cuda_time": 0.05472861418951375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05472861418951375, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 804.226, "cuda_time_us": 15.167, "pct_cuda_time": 0.22361769703996628, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.036328476660280676, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.231, "pct_cuda_time": 0.16558649406315434, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02170272631653131, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.633, "cuda_time_us": 15.968, "pct_cuda_time": 0.2354274006945462, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.968, "pct_cuda_time": 0.2354274006945462, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 82.512, "cuda_time_us": 3.296, "pct_cuda_time": 0.048595235013102714, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.048595235013102714, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 460.807, "cuda_time_us": 133.63, "pct_cuda_time": 1.9702006234226082, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 172.759, "cuda_time_us": 81.246, "pct_cuda_time": 1.1978666455930047, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.246, "pct_cuda_time": 1.1978666455930047, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.783, "cuda_time_us": 8.896, "pct_cuda_time": 0.1311599546955588, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.1311599546955588, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.832, "cuda_time_us": 43.488, "pct_cuda_time": 0.6411740231340446, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.488, "pct_cuda_time": 0.6411740231340446, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2265.587, "cuda_time_us": 196.512, "pct_cuda_time": 2.8973139632569302, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.325, "cuda_time_us": 3.296, "pct_cuda_time": 0.048595235013102714, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.048595235013102714, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1607.113, "cuda_time_us": 56.289, "pct_cuda_time": 0.8299081261081732, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 134.753, "cuda_time_us": 21.6, "pct_cuda_time": 0.31846391877518776, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.6, "pct_cuda_time": 0.31846391877518776, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 480.194, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 676.672, "cuda_time_us": 15.073, "pct_cuda_time": 0.2222317892452965, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.03538487986390975, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.201, "pct_cuda_time": 0.16514418306485545, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02170272631653131, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 177.024, "cuda_time_us": 16.0, "pct_cuda_time": 0.23589919909273163, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.0, "pct_cuda_time": 0.23589919909273163, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.144, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 429.039, "cuda_time_us": 133.75900000000001, "pct_cuda_time": 1.9721025607152936, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.378, "cuda_time_us": 81.343, "pct_cuda_time": 1.1992967844875044, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.343, "pct_cuda_time": 1.1992967844875044, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.078, "cuda_time_us": 8.864, "pct_cuda_time": 0.13068815629737335, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.864, "pct_cuda_time": 0.13068815629737335, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 138.562, "cuda_time_us": 43.552, "pct_cuda_time": 0.6421176199304155, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.552, "pct_cuda_time": 0.6421176199304155, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2350.854, "cuda_time_us": 196.095, "pct_cuda_time": 2.891165840380576, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.79, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1680.112, "cuda_time_us": 55.681, "pct_cuda_time": 0.8209439565426494, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.73, "cuda_time_us": 20.897, "pct_cuda_time": 0.3080990977150508, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.897, "pct_cuda_time": 0.3080990977150508, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 521.677, "cuda_time_us": 4.032, "pct_cuda_time": 0.059446598171368374, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 4.032, "pct_cuda_time": 0.059446598171368374, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 707.291, "cuda_time_us": 14.879999999999999, "pct_cuda_time": 0.2193862551562404, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.03538487986390975, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.2, "pct_cuda_time": 0.16512943936491215, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018871935927418534, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.887, "cuda_time_us": 15.872, "pct_cuda_time": 0.2340120054999898, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.872, "pct_cuda_time": 0.2340120054999898, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.008, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 434.184, "cuda_time_us": 134.046, "pct_cuda_time": 1.976334002599019, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 137.239, "cuda_time_us": 81.439, "pct_cuda_time": 1.2007121796820608, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.439, "pct_cuda_time": 1.2007121796820608, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.402, "cuda_time_us": 9.12, "pct_cuda_time": 0.13446254348285702, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.13446254348285702, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 148.532, "cuda_time_us": 43.487, "pct_cuda_time": 0.6411592794341013, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.487, "pct_cuda_time": 0.6411592794341013, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2229.308, "cuda_time_us": 197.372, "pct_cuda_time": 2.909993545208165, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.131, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1591.53, "cuda_time_us": 56.223, "pct_cuda_time": 0.8289350419119157, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.735, "cuda_time_us": 21.663, "pct_cuda_time": 0.31939277187161536, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.663, "pct_cuda_time": 0.31939277187161536, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 482.978, "cuda_time_us": 3.776, "pct_cuda_time": 0.055672210985884665, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.055672210985884665, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 672.217, "cuda_time_us": 15.072, "pct_cuda_time": 0.2222170455453532, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0372720734566516, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.16607303616128305, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018871935927418534, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.521, "cuda_time_us": 15.712, "pct_cuda_time": 0.23165301350906248, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.23165301350906248, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.218, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 428.334, "cuda_time_us": 134.877, "pct_cuda_time": 1.988586017251898, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 147.456, "cuda_time_us": 81.246, "pct_cuda_time": 1.1978666455930047, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.246, "pct_cuda_time": 1.1978666455930047, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.616, "cuda_time_us": 9.152, "pct_cuda_time": 0.1349343418810425, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.1349343418810425, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.565, "cuda_time_us": 44.479, "pct_cuda_time": 0.6557850297778507, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.479, "pct_cuda_time": 0.6557850297778507, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2522.486, "cuda_time_us": 196.38, "pct_cuda_time": 2.895367794864415, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.372, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1851.752, "cuda_time_us": 55.486999999999995, "pct_cuda_time": 0.81808367875365, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 259.103, "cuda_time_us": 20.767, "pct_cuda_time": 0.3061824167224224, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.767, "pct_cuda_time": 0.3061824167224224, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 543.472, "cuda_time_us": 3.52, "pct_cuda_time": 0.05189782380040096, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.52, "pct_cuda_time": 0.05189782380040096, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 721.93, "cuda_time_us": 15.296, "pct_cuda_time": 0.22551963433265146, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.656, "pct_cuda_time": 0.03915926704939345, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.168, "pct_cuda_time": 0.16465764096672666, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02170272631653131, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 179.654, "cuda_time_us": 15.904, "pct_cuda_time": 0.23448380389817525, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.904, "pct_cuda_time": 0.23448380389817525, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 84.082, "cuda_time_us": 3.135, "pct_cuda_time": 0.0462214993222321, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.0462214993222321, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 447.255, "cuda_time_us": 134.622, "pct_cuda_time": 1.9848263737663578, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 150.555, "cuda_time_us": 81.599, "pct_cuda_time": 1.203071171672988, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.599, "pct_cuda_time": 1.203071171672988, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 99.1, "cuda_time_us": 9.408, "pct_cuda_time": 0.1387087290665262, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.408, "pct_cuda_time": 0.1387087290665262, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 140.745, "cuda_time_us": 43.615, "pct_cuda_time": 0.6430464730268431, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.615, "pct_cuda_time": 0.6430464730268431, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2259.027, "cuda_time_us": 197.34, "pct_cuda_time": 2.909521746809979, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.988, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1591.168, "cuda_time_us": 56.255, "pct_cuda_time": 0.8294068403101013, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 141.344, "cuda_time_us": 21.632, "pct_cuda_time": 0.3189357171733732, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.632, "pct_cuda_time": 0.3189357171733732, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 466.175, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 684.193, "cuda_time_us": 14.847000000000001, "pct_cuda_time": 0.21889971305811168, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03585667826209521, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.912, "pct_cuda_time": 0.160883253781243, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.503, "pct_cuda_time": 0.022159781014773478, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 163.239, "cuda_time_us": 16.128, "pct_cuda_time": 0.2377863926854735, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.128, "pct_cuda_time": 0.2377863926854735, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.942, "cuda_time_us": 3.071, "pct_cuda_time": 0.04527790252586118, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.071, "pct_cuda_time": 0.04527790252586118, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 443.712, "cuda_time_us": 134.814, "pct_cuda_time": 1.9876571641554701, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 135.741, "cuda_time_us": 81.823, "pct_cuda_time": 1.2063737604602862, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.823, "pct_cuda_time": 1.2063737604602862, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 119.465, "cuda_time_us": 8.992, "pct_cuda_time": 0.1325753498901152, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.1325753498901152, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.087, "cuda_time_us": 43.999, "pct_cuda_time": 0.6487080538050688, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.999, "pct_cuda_time": 0.6487080538050688, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2430.766, "cuda_time_us": 194.813, "pct_cuda_time": 2.8722644170532705, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 74.01, "cuda_time_us": 3.039, "pct_cuda_time": 0.044806104127675724, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.039, "pct_cuda_time": 0.044806104127675724, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1778.664, "cuda_time_us": 55.295, "pct_cuda_time": 0.8152528883645374, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 142.53, "cuda_time_us": 20.608, "pct_cuda_time": 0.30383816843143835, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.30383816843143835, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 493.638, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 723.672, "cuda_time_us": 14.879, "pct_cuda_time": 0.21937151145629713, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.03538487986390975, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.199, "pct_cuda_time": 0.16511469566496886, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018871935927418534, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 179.069, "cuda_time_us": 16.16, "pct_cuda_time": 0.23825819108365898, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.16, "pct_cuda_time": 0.23825819108365898, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.334, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 423.708, "cuda_time_us": 133.375, "pct_cuda_time": 1.966440979937068, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 137.085, "cuda_time_us": 80.991, "pct_cuda_time": 1.1941070021074645, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.991, "pct_cuda_time": 1.1941070021074645, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.028, "cuda_time_us": 8.96, "pct_cuda_time": 0.13210355149192973, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.13210355149192973, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.26, "cuda_time_us": 43.424, "pct_cuda_time": 0.6402304263376737, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.424, "pct_cuda_time": 0.6402304263376737, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2520.604, "cuda_time_us": 196.957, "pct_cuda_time": 2.9038749097316967, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.006, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1868.239, "cuda_time_us": 56.287, "pct_cuda_time": 0.8298786387082866, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 132.651, "cuda_time_us": 21.824, "pct_cuda_time": 0.321766507562486, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.824, "pct_cuda_time": 0.321766507562486, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 472.183, "cuda_time_us": 3.808, "pct_cuda_time": 0.05614400938407013, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05614400938407013, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 941.1, "cuda_time_us": 15.039, "pct_cuda_time": 0.22173050344722448, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036800275058466135, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.231, "pct_cuda_time": 0.16558649406315434, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019343734325603996, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 184.977, "cuda_time_us": 15.616, "pct_cuda_time": 0.2302376183145061, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.2302376183145061, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.414, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 430.679, "cuda_time_us": 134.27, "pct_cuda_time": 1.9796365913863176, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.363, "cuda_time_us": 80.863, "pct_cuda_time": 1.1922198085147224, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.863, "pct_cuda_time": 1.1922198085147224, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 102.037, "cuda_time_us": 8.768, "pct_cuda_time": 0.12927276110281694, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.768, "pct_cuda_time": 0.12927276110281694, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.348, "cuda_time_us": 44.639, "pct_cuda_time": 0.6581440217687781, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.639, "pct_cuda_time": 0.6581440217687781, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2189.48, "cuda_time_us": 196.50900000000001, "pct_cuda_time": 2.8972697321571004, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.032, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1566.33, "cuda_time_us": 55.295, "pct_cuda_time": 0.8152528883645374, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 131.371, "cuda_time_us": 20.768, "pct_cuda_time": 0.30619716042236567, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.768, "pct_cuda_time": 0.30619716042236567, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 458.695, "cuda_time_us": 3.776, "pct_cuda_time": 0.055672210985884665, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.055672210985884665, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 683.961, "cuda_time_us": 15.2, "pct_cuda_time": 0.22410423913809507, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036800275058466135, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.1656012377630976, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02170272631653131, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 159.164, "cuda_time_us": 15.551, "pct_cuda_time": 0.22927927781819188, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.551, "pct_cuda_time": 0.22927927781819188, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.682, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 414.307, "cuda_time_us": 134.814, "pct_cuda_time": 1.9876571641554701, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 135.566, "cuda_time_us": 81.887, "pct_cuda_time": 1.2073173572566573, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.887, "pct_cuda_time": 1.2073173572566573, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.248, "cuda_time_us": 9.056, "pct_cuda_time": 0.13351894668648612, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.13351894668648612, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.06, "cuda_time_us": 43.871, "pct_cuda_time": 0.6468208602123269, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.871, "pct_cuda_time": 0.6468208602123269, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2404.876, "cuda_time_us": 197.406, "pct_cuda_time": 2.9104948310062366, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 66.543, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1736.111, "cuda_time_us": 56.127, "pct_cuda_time": 0.8275196467173593, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 166.375, "cuda_time_us": 21.343, "pct_cuda_time": 0.3146747878897607, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.343, "pct_cuda_time": 0.3146747878897607, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 517.69, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 730.099, "cuda_time_us": 15.104, "pct_cuda_time": 0.22268884394353866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03585667826209521, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.1656012377630976, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.44, "pct_cuda_time": 0.021230927918345847, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.063, "cuda_time_us": 16.064, "pct_cuda_time": 0.2368427958891026, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 16.064, "pct_cuda_time": 0.2368427958891026, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 87.466, "cuda_time_us": 3.328, "pct_cuda_time": 0.04906703341128818, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.04906703341128818, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 442.074, "cuda_time_us": 134.719, "pct_cuda_time": 1.986256512660857, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 144.513, "cuda_time_us": 81.567, "pct_cuda_time": 1.2025993732748026, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.567, "pct_cuda_time": 1.2025993732748026, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 101.256, "cuda_time_us": 9.44, "pct_cuda_time": 0.13918052746471168, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.44, "pct_cuda_time": 0.13918052746471168, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 139.302, "cuda_time_us": 43.712, "pct_cuda_time": 0.644476611921343, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.712, "pct_cuda_time": 0.644476611921343, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2262.544, "cuda_time_us": 195.517, "pct_cuda_time": 2.882643981813351, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.967, "cuda_time_us": 3.072, "pct_cuda_time": 0.04529264622580448, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04529264622580448, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1591.47, "cuda_time_us": 55.104, "pct_cuda_time": 0.8124368416753678, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.078, "cuda_time_us": 20.608, "pct_cuda_time": 0.30383816843143835, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.608, "pct_cuda_time": 0.30383816843143835, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 464.717, "cuda_time_us": 3.808, "pct_cuda_time": 0.05614400938407013, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.808, "pct_cuda_time": 0.05614400938407013, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 680.95, "cuda_time_us": 14.943999999999999, "pct_cuda_time": 0.22032985195261134, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036800275058466135, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.136, "pct_cuda_time": 0.1641858425685412, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019343734325603996, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 166.991, "cuda_time_us": 15.744, "pct_cuda_time": 0.23212481190724796, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.744, "pct_cuda_time": 0.23212481190724796, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.867, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 448.802, "cuda_time_us": 134.237, "pct_cuda_time": 1.9791500492881884, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 138.518, "cuda_time_us": 81.31, "pct_cuda_time": 1.1988102423893758, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.31, "pct_cuda_time": 1.1988102423893758, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 116.703, "cuda_time_us": 9.28, "pct_cuda_time": 0.13682153547378434, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.28, "pct_cuda_time": 0.13682153547378434, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.417, "cuda_time_us": 43.647, "pct_cuda_time": 0.6435182714250286, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.647, "pct_cuda_time": 0.6435182714250286, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2240.725, "cuda_time_us": 196.799, "pct_cuda_time": 2.9015454051406557, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.218, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1605.082, "cuda_time_us": 56.352000000000004, "pct_cuda_time": 0.830836979204601, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.378, "cuda_time_us": 21.504, "pct_cuda_time": 0.31704852358063135, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.504, "pct_cuda_time": 0.31704852358063135, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 475.192, "cuda_time_us": 3.776, "pct_cuda_time": 0.055672210985884665, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.776, "pct_cuda_time": 0.055672210985884665, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 687.953, "cuda_time_us": 15.168, "pct_cuda_time": 0.2236324407399096, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0372720734566516, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.328, "pct_cuda_time": 0.16701663295765398, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019343734325603996, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 175.05, "cuda_time_us": 15.904, "pct_cuda_time": 0.23448380389817525, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.904, "pct_cuda_time": 0.23448380389817525, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.959, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 420.004, "cuda_time_us": 134.079, "pct_cuda_time": 1.976820544697148, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 139.859, "cuda_time_us": 81.663, "pct_cuda_time": 1.204014768469359, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.663, "pct_cuda_time": 1.204014768469359, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 95.247, "cuda_time_us": 8.992, "pct_cuda_time": 0.1325753498901152, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.992, "pct_cuda_time": 0.1325753498901152, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 132.509, "cuda_time_us": 43.424, "pct_cuda_time": 0.6402304263376737, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.424, "pct_cuda_time": 0.6402304263376737, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2313.991, "cuda_time_us": 194.942, "pct_cuda_time": 2.874166354345956, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.666, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1676.662, "cuda_time_us": 54.815, "pct_cuda_time": 0.8081759123917552, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 132.942, "cuda_time_us": 20.48, "pct_cuda_time": 0.30195097483869654, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.48, "pct_cuda_time": 0.30195097483869654, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 502.284, "cuda_time_us": 3.52, "pct_cuda_time": 0.05189782380040096, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.52, "pct_cuda_time": 0.05189782380040096, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 721.185, "cuda_time_us": 15.231, "pct_cuda_time": 0.22456129383633724, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.03538487986390975, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.327, "pct_cuda_time": 0.16700188925771073, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.022174524714716776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 171.228, "cuda_time_us": 15.584, "pct_cuda_time": 0.2297658199163206, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.584, "pct_cuda_time": 0.2297658199163206, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 80.151, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 418.294, "cuda_time_us": 133.695, "pct_cuda_time": 1.9711589639189224, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 136.607, "cuda_time_us": 80.447, "pct_cuda_time": 1.1860864293383115, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.447, "pct_cuda_time": 1.1860864293383115, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.83, "cuda_time_us": 8.896, "pct_cuda_time": 0.1311599546955588, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.1311599546955588, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.261, "cuda_time_us": 44.352, "pct_cuda_time": 0.6539125798850521, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.352, "pct_cuda_time": 0.6539125798850521, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2265.341, "cuda_time_us": 197.278, "pct_cuda_time": 2.9086076374134944, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.759, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1627.483, "cuda_time_us": 56.031000000000006, "pct_cuda_time": 0.826104251522803, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.644, "cuda_time_us": 21.791, "pct_cuda_time": 0.3212799654643572, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.791, "pct_cuda_time": 0.3212799654643572, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 495.702, "cuda_time_us": 3.84, "pct_cuda_time": 0.05661580778225559, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.84, "pct_cuda_time": 0.05661580778225559, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 703.137, "cuda_time_us": 14.88, "pct_cuda_time": 0.2193862551562404, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.03538487986390975, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 10.976, "pct_cuda_time": 0.16182685057761392, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.022174524714716776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 163.397, "cuda_time_us": 15.52, "pct_cuda_time": 0.2288222231199497, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.2288222231199497, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 77.172, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 425.588, "cuda_time_us": 134.975, "pct_cuda_time": 1.9900308998463407, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 135.942, "cuda_time_us": 81.823, "pct_cuda_time": 1.2063737604602862, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.823, "pct_cuda_time": 1.2063737604602862, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.419, "cuda_time_us": 9.312, "pct_cuda_time": 0.13729333387196982, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.312, "pct_cuda_time": 0.13729333387196982, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 145.966, "cuda_time_us": 43.84, "pct_cuda_time": 0.6463638055140848, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.84, "pct_cuda_time": 0.6463638055140848, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2607.789, "cuda_time_us": 196.31799999999998, "pct_cuda_time": 2.894453685467931, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.311, "cuda_time_us": 3.137, "pct_cuda_time": 0.0462509867221187, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.0462509867221187, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1765.628, "cuda_time_us": 55.391000000000005, "pct_cuda_time": 0.8166682835590938, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 130.124, "cuda_time_us": 21.024, "pct_cuda_time": 0.3099715476078494, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.024, "pct_cuda_time": 0.3099715476078494, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 507.092, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 796.364, "cuda_time_us": 14.911999999999999, "pct_cuda_time": 0.2198580535544259, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.368, "pct_cuda_time": 0.034913081465724284, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.16607303616128305, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018871935927418534, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 197.009, "cuda_time_us": 15.807, "pct_cuda_time": 0.2330536650036756, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.807, "pct_cuda_time": 0.2330536650036756, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.676, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 614.523, "cuda_time_us": 134.68599999999998, "pct_cuda_time": 1.985769970562728, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.872, "cuda_time_us": 81.695, "pct_cuda_time": 1.2044865668675444, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.695, "pct_cuda_time": 1.2044865668675444, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.011, "cuda_time_us": 8.928, "pct_cuda_time": 0.13163175309374428, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.13163175309374428, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 319.831, "cuda_time_us": 44.063, "pct_cuda_time": 0.6496516506014397, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.063, "pct_cuda_time": 0.6496516506014397, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2396.962, "cuda_time_us": 196.509, "pct_cuda_time": 2.8972697321571, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 99.965, "cuda_time_us": 3.072, "pct_cuda_time": 0.04529264622580448, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.072, "pct_cuda_time": 0.04529264622580448, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1692.981, "cuda_time_us": 56.0, "pct_cuda_time": 0.8256471968245608, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 140.54, "cuda_time_us": 21.696, "pct_cuda_time": 0.3198793139697441, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.696, "pct_cuda_time": 0.3198793139697441, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 523.433, "cuda_time_us": 3.68, "pct_cuda_time": 0.05425681579132828, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.68, "pct_cuda_time": 0.05425681579132828, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 716.5, "cuda_time_us": 15.232, "pct_cuda_time": 0.22457603753628053, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0372720734566516, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.392, "pct_cuda_time": 0.16796022975402491, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019343734325603996, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 170.643, "cuda_time_us": 15.392, "pct_cuda_time": 0.22693502952720784, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.392, "pct_cuda_time": 0.22693502952720784, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.358, "cuda_time_us": 3.135, "pct_cuda_time": 0.0462214993222321, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.135, "pct_cuda_time": 0.0462214993222321, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 446.189, "cuda_time_us": 134.302, "pct_cuda_time": 1.9801083897845029, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.116, "cuda_time_us": 81.599, "pct_cuda_time": 1.203071171672988, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.599, "pct_cuda_time": 1.203071171672988, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 96.59, "cuda_time_us": 8.8, "pct_cuda_time": 0.12974455950100242, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.8, "pct_cuda_time": 0.12974455950100242, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.731, "cuda_time_us": 43.903, "pct_cuda_time": 0.6472926586105123, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.903, "pct_cuda_time": 0.6472926586105123, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2348.774, "cuda_time_us": 195.73800000000003, "pct_cuda_time": 2.8859023395008196, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.052, "cuda_time_us": 3.231, "pct_cuda_time": 0.04763689451678849, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.231, "pct_cuda_time": 0.04763689451678849, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1642.05, "cuda_time_us": 55.071, "pct_cuda_time": 0.8119502995772391, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.665, "cuda_time_us": 20.576, "pct_cuda_time": 0.3033663700332529, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.576, "pct_cuda_time": 0.3033663700332529, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 487.547, "cuda_time_us": 3.552, "pct_cuda_time": 0.05236962219858643, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.552, "pct_cuda_time": 0.05236962219858643, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 692.877, "cuda_time_us": 15.231, "pct_cuda_time": 0.22456129383633724, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.036328476660280676, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.263, "pct_cuda_time": 0.1660582924613398, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.022174524714716776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 182.365, "cuda_time_us": 15.712, "pct_cuda_time": 0.23165301350906248, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.23165301350906248, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.897, "cuda_time_us": 3.295, "pct_cuda_time": 0.04858049131315942, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.295, "pct_cuda_time": 0.04858049131315942, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 463.985, "cuda_time_us": 134.14100000000002, "pct_cuda_time": 1.9777346540936325, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.88, "cuda_time_us": 81.471, "pct_cuda_time": 1.2011839780802462, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.471, "pct_cuda_time": 1.2011839780802462, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 123.155, "cuda_time_us": 8.991, "pct_cuda_time": 0.13256060619017188, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.991, "pct_cuda_time": 0.13256060619017188, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.947, "cuda_time_us": 43.679, "pct_cuda_time": 0.6439900698232142, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.679, "pct_cuda_time": 0.6439900698232142, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2375.951, "cuda_time_us": 196.606, "pct_cuda_time": 2.8986998710516, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.466, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1710.629, "cuda_time_us": 55.617000000000004, "pct_cuda_time": 0.8200003597462785, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 146.94, "cuda_time_us": 21.472, "pct_cuda_time": 0.3165767251824459, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.472, "pct_cuda_time": 0.3165767251824459, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 504.244, "cuda_time_us": 3.585, "pct_cuda_time": 0.052856164296715184, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.585, "pct_cuda_time": 0.052856164296715184, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 725.907, "cuda_time_us": 15.104, "pct_cuda_time": 0.22268884394353866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03585667826209521, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.2, "pct_cuda_time": 0.16512943936491215, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02170272631653131, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 188.248, "cuda_time_us": 15.456, "pct_cuda_time": 0.22787862632357878, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.456, "pct_cuda_time": 0.22787862632357878, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 83.842, "cuda_time_us": 3.137, "pct_cuda_time": 0.0462509867221187, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.137, "pct_cuda_time": 0.0462509867221187, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 437.744, "cuda_time_us": 134.62, "pct_cuda_time": 1.984796886366471, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 138.562, "cuda_time_us": 81.726, "pct_cuda_time": 1.2049436215657867, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.726, "pct_cuda_time": 1.2049436215657867, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 112.783, "cuda_time_us": 9.247, "pct_cuda_time": 0.1363349933756556, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.247, "pct_cuda_time": 0.1363349933756556, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.884, "cuda_time_us": 43.647, "pct_cuda_time": 0.6435182714250286, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.647, "pct_cuda_time": 0.6435182714250286, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2274.36, "cuda_time_us": 195.292, "pct_cuda_time": 2.879326649326109, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.471, "cuda_time_us": 3.04, "pct_cuda_time": 0.04482084782761901, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04482084782761901, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1633.668, "cuda_time_us": 55.199, "pct_cuda_time": 0.8138374931699808, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 137.772, "cuda_time_us": 21.087, "pct_cuda_time": 0.31090040070427705, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.087, "pct_cuda_time": 0.31090040070427705, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 479.763, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 701.23, "cuda_time_us": 14.815999999999999, "pct_cuda_time": 0.21844265835986948, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.03538487986390975, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.136, "pct_cuda_time": 0.1641858425685412, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018871935927418534, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 171.191, "cuda_time_us": 15.648, "pct_cuda_time": 0.23070941671269157, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.648, "pct_cuda_time": 0.23070941671269157, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.338, "cuda_time_us": 3.295, "pct_cuda_time": 0.04858049131315942, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.295, "pct_cuda_time": 0.04858049131315942, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 415.363, "cuda_time_us": 133.758, "pct_cuda_time": 1.97208781701535, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 135.085, "cuda_time_us": 81.247, "pct_cuda_time": 1.197881389292948, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.247, "pct_cuda_time": 1.197881389292948, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 93.651, "cuda_time_us": 9.12, "pct_cuda_time": 0.13446254348285702, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.12, "pct_cuda_time": 0.13446254348285702, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 134.326, "cuda_time_us": 43.391, "pct_cuda_time": 0.639743884239545, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.391, "pct_cuda_time": 0.639743884239545, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2392.301, "cuda_time_us": 197.118, "pct_cuda_time": 2.906248645422567, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 67.402, "cuda_time_us": 3.04, "pct_cuda_time": 0.04482084782761901, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.04, "pct_cuda_time": 0.04482084782761901, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1722.617, "cuda_time_us": 56.479, "pct_cuda_time": 0.8327094290973993, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 131.041, "cuda_time_us": 21.856, "pct_cuda_time": 0.3222383059606715, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.856, "pct_cuda_time": 0.3222383059606715, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 542.614, "cuda_time_us": 3.904, "pct_cuda_time": 0.05755940457862652, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.904, "pct_cuda_time": 0.05755940457862652, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 714.36, "cuda_time_us": 15.072, "pct_cuda_time": 0.2222170455453532, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.496, "pct_cuda_time": 0.036800275058466135, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.16607303616128305, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019343734325603996, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 192.377, "cuda_time_us": 15.647, "pct_cuda_time": 0.23069467301274826, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.647, "pct_cuda_time": 0.23069467301274826, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 89.433, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.232, "pct_cuda_time": 0.047651638216731795, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 439.661, "cuda_time_us": 134.367, "pct_cuda_time": 1.9810667302808167, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 144.819, "cuda_time_us": 81.151, "pct_cuda_time": 1.1964659940983917, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.151, "pct_cuda_time": 1.1964659940983917, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 98.334, "cuda_time_us": 8.896, "pct_cuda_time": 0.1311599546955588, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.896, "pct_cuda_time": 0.1311599546955588, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 142.092, "cuda_time_us": 44.32, "pct_cuda_time": 0.6534407814868667, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.32, "pct_cuda_time": 0.6534407814868667, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2254.212, "cuda_time_us": 196.02800000000002, "pct_cuda_time": 2.890178012484375, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.312, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.168, "pct_cuda_time": 0.04670804142036087, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1584.383, "cuda_time_us": 55.261, "pct_cuda_time": 0.8147516025664653, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 139.862, "cuda_time_us": 20.671, "pct_cuda_time": 0.304767021527866, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.671, "pct_cuda_time": 0.304767021527866, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 458.819, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 681.537, "cuda_time_us": 15.135, "pct_cuda_time": 0.22314589864178086, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03585667826209521, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.199, "pct_cuda_time": 0.16511469566496886, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.022174524714716776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 165.096, "cuda_time_us": 15.807, "pct_cuda_time": 0.2330536650036756, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.807, "pct_cuda_time": 0.2330536650036756, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 81.025, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 443.759, "cuda_time_us": 134.495, "pct_cuda_time": 1.982953923873559, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 159.896, "cuda_time_us": 80.863, "pct_cuda_time": 1.1922198085147224, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.863, "pct_cuda_time": 1.1922198085147224, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.975, "cuda_time_us": 9.056, "pct_cuda_time": 0.13351894668648612, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.13351894668648612, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 135.615, "cuda_time_us": 44.576, "pct_cuda_time": 0.6572151686723504, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.576, "pct_cuda_time": 0.6572151686723504, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2181.419, "cuda_time_us": 197.85399999999998, "pct_cuda_time": 2.917100008580833, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 72.86, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.2, "pct_cuda_time": 0.047179839818546336, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1552.382, "cuda_time_us": 56.19199999999999, "pct_cuda_time": 0.8284779872136735, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 133.332, "cuda_time_us": 21.888, "pct_cuda_time": 0.32271010435885694, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.888, "pct_cuda_time": 0.32271010435885694, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 448.215, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 672.289, "cuda_time_us": 15.168, "pct_cuda_time": 0.2236324407399096, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.432, "pct_cuda_time": 0.03585667826209521, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.1656012377630976, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.022174524714716776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 164.816, "cuda_time_us": 15.52, "pct_cuda_time": 0.2288222231199497, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.52, "pct_cuda_time": 0.2288222231199497, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.216, "cuda_time_us": 3.167, "pct_cuda_time": 0.04669329772041757, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.167, "pct_cuda_time": 0.04669329772041757, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 412.353, "cuda_time_us": 135.295, "pct_cuda_time": 1.9947488838281953, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 136.54, "cuda_time_us": 81.823, "pct_cuda_time": 1.2063737604602862, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.823, "pct_cuda_time": 1.2063737604602862, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 94.55, "cuda_time_us": 9.024, "pct_cuda_time": 0.13304714828830064, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.024, "pct_cuda_time": 0.13304714828830064, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 133.462, "cuda_time_us": 44.448, "pct_cuda_time": 0.6553279750796085, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.448, "pct_cuda_time": 0.6553279750796085, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2560.417, "cuda_time_us": 195.93200000000002, "pct_cuda_time": 2.8887626172898186, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 69.944, "cuda_time_us": 3.008, "pct_cuda_time": 0.04434904942943355, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.008, "pct_cuda_time": 0.04434904942943355, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1910.241, "cuda_time_us": 55.519000000000005, "pct_cuda_time": 0.8185554771518355, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 138.93, "cuda_time_us": 20.704, "pct_cuda_time": 0.30525356362599476, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.704, "pct_cuda_time": 0.30525356362599476, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 497.848, "cuda_time_us": 3.679, "pct_cuda_time": 0.054242072091384985, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.679, "pct_cuda_time": 0.054242072091384985, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 941.399, "cuda_time_us": 15.424, "pct_cuda_time": 0.2274068279253933, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.688, "pct_cuda_time": 0.03963106544757892, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.232, "pct_cuda_time": 0.1656012377630976, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.504, "pct_cuda_time": 0.022174524714716776, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.944, "cuda_time_us": 15.712, "pct_cuda_time": 0.23165301350906248, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.712, "pct_cuda_time": 0.23165301350906248, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 79.236, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 429.771, "cuda_time_us": 134.269, "pct_cuda_time": 1.979621847686374, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 140.816, "cuda_time_us": 81.022, "pct_cuda_time": 1.1945640568057065, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.022, "pct_cuda_time": 1.1945640568057065, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.373, "cuda_time_us": 9.152, "pct_cuda_time": 0.1349343418810425, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.152, "pct_cuda_time": 0.1349343418810425, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.416, "cuda_time_us": 44.095, "pct_cuda_time": 0.650123448999625, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.095, "pct_cuda_time": 0.650123448999625, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2218.781, "cuda_time_us": 196.637, "pct_cuda_time": 2.899156925749842, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.85, "cuda_time_us": 3.296, "pct_cuda_time": 0.048595235013102714, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.296, "pct_cuda_time": 0.048595235013102714, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1586.752, "cuda_time_us": 56.702999999999996, "pct_cuda_time": 0.8360120178846976, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 132.988, "cuda_time_us": 22.4, "pct_cuda_time": 0.3302588787298243, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 22.4, "pct_cuda_time": 0.3302588787298243, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 473.047, "cuda_time_us": 3.712, "pct_cuda_time": 0.05472861418951375, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.712, "pct_cuda_time": 0.05472861418951375, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 680.317, "cuda_time_us": 15.104, "pct_cuda_time": 0.22268884394353866, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.528, "pct_cuda_time": 0.0372720734566516, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.264, "pct_cuda_time": 0.16607303616128305, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.312, "pct_cuda_time": 0.019343734325603996, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 164.175, "cuda_time_us": 15.487, "pct_cuda_time": 0.22833568102182092, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.487, "pct_cuda_time": 0.22833568102182092, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 75.713, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 419.956, "cuda_time_us": 133.534, "pct_cuda_time": 1.9687852282280518, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 134.032, "cuda_time_us": 81.503, "pct_cuda_time": 1.201655776478432, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.503, "pct_cuda_time": 1.201655776478432, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 92.555, "cuda_time_us": 8.799, "pct_cuda_time": 0.1297298158010591, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.799, "pct_cuda_time": 0.1297298158010591, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 144.955, "cuda_time_us": 43.232, "pct_cuda_time": 0.6373996359485609, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.232, "pct_cuda_time": 0.6373996359485609, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2366.551, "cuda_time_us": 196.477, "pct_cuda_time": 2.896797933758915, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 65.178, "cuda_time_us": 3.423, "pct_cuda_time": 0.050467684905901276, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.423, "pct_cuda_time": 0.050467684905901276, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1717.631, "cuda_time_us": 55.36, "pct_cuda_time": 0.8162112288608515, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 128.078, "cuda_time_us": 20.736, "pct_cuda_time": 0.3057253620241802, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.736, "pct_cuda_time": 0.3057253620241802, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 495.661, "cuda_time_us": 3.52, "pct_cuda_time": 0.05189782380040096, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.52, "pct_cuda_time": 0.05189782380040096, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 695.672, "cuda_time_us": 15.232, "pct_cuda_time": 0.22457603753628053, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.464, "pct_cuda_time": 0.036328476660280676, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.296, "pct_cuda_time": 0.16654483455946853, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02170272631653131, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 267.213, "cuda_time_us": 15.872, "pct_cuda_time": 0.2340120054999898, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.872, "pct_cuda_time": 0.2340120054999898, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 85.381, "cuda_time_us": 3.264, "pct_cuda_time": 0.048123436614917255, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.264, "pct_cuda_time": 0.048123436614917255, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 429.1, "cuda_time_us": 134.43, "pct_cuda_time": 1.9819955833772447, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 141.345, "cuda_time_us": 81.886, "pct_cuda_time": 1.207302613556714, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.886, "pct_cuda_time": 1.207302613556714, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 100.266, "cuda_time_us": 8.96, "pct_cuda_time": 0.13210355149192973, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 8.96, "pct_cuda_time": 0.13210355149192973, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.713, "cuda_time_us": 43.584, "pct_cuda_time": 0.6425894183286011, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 43.584, "pct_cuda_time": 0.6425894183286011, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2313.215, "cuda_time_us": 197.72500000000002, "pct_cuda_time": 2.915198071288148, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 68.098, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.136, "pct_cuda_time": 0.04623624302217541, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1663.353, "cuda_time_us": 55.902, "pct_cuda_time": 0.8242023142301179, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 135.734, "cuda_time_us": 21.472, "pct_cuda_time": 0.3165767251824459, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 21.472, "pct_cuda_time": 0.3165767251824459, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 507.937, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.616, "pct_cuda_time": 0.05331321899495735, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 710.776, "cuda_time_us": 15.197999999999999, "pct_cuda_time": 0.22407475173820846, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.431, "pct_cuda_time": 0.035841934562151914, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.295, "pct_cuda_time": 0.16653009085952525, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.472, "pct_cuda_time": 0.02170272631653131, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 169.443, "cuda_time_us": 15.616, "pct_cuda_time": 0.2302376183145061, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.616, "pct_cuda_time": 0.2302376183145061, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 76.657, "cuda_time_us": 3.328, "pct_cuda_time": 0.04906703341128818, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.328, "pct_cuda_time": 0.04906703341128818, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 433.802, "cuda_time_us": 135.359, "pct_cuda_time": 1.9956924806245664, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 136.306, "cuda_time_us": 81.151, "pct_cuda_time": 1.1964659940983917, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 81.151, "pct_cuda_time": 1.1964659940983917, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 112.217, "cuda_time_us": 9.248, "pct_cuda_time": 0.1363497370755989, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.248, "pct_cuda_time": 0.1363497370755989, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 136.444, "cuda_time_us": 44.96, "pct_cuda_time": 0.662876749450576, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.96, "pct_cuda_time": 0.662876749450576, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "LlamaDecoderLayer", "cpu_time_us": 2353.317, "cuda_time_us": 195.676, "pct_cuda_time": 2.8849882301043346, "trace": "" }, "children": [ { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 71.518, "cuda_time_us": 3.103, "pct_cuda_time": 0.04574970092404665, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.103, "pct_cuda_time": 0.04574970092404665, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaAttention", "cpu_time_us": 1687.419, "cuda_time_us": 55.327999999999996, "pct_cuda_time": 0.815739430462666, "trace": "" }, "children": [ { "entry": { "name": "QKVParallelLinear(weight=bfloat16[6144, 4096])", "cpu_time_us": 130.437, "cuda_time_us": 20.992, "pct_cuda_time": 0.30949974920966394, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 20.992, "pct_cuda_time": 0.30949974920966394, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 6144]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 6144]) <- linear(bfloat16[12, 4096], bfloat16[6144, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Llama3RotaryEmbedding", "cpu_time_us": 502.806, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "" }, "children": [ { "entry": { "name": "void vllm::rotary_embedding_kernel(long const*, c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, int, long, long, int, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.648, "pct_cuda_time": 0.053785017393142814, "trace": "_C::rotary_embedding(int64[12], bfloat16[12, 4096], bfloat16[12, 1024], 128, bfloat16[131072, 128], True)" }, "children": [] } ] }, { "entry": { "name": "Attention", "cpu_time_us": 713.763, "cuda_time_us": 14.751999999999999, "pct_cuda_time": 0.21749906156349857, "trace": "" }, "children": [ { "entry": { "name": "void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0>(__nv_bfloat16 const*, __nv_bfloat16 const*, __nv_bfloat16*, __nv_bfloat16*, long const*, int, int, int, int, int, int, float const*, float const*)", "cpu_time_us": 0, "cuda_time_us": 2.4, "pct_cuda_time": 0.03538487986390975, "trace": "_C_cache_ops::reshape_and_cache_flash(bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], int64[12], None, float32[], float32[]) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void cutlass::device_kernel, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > > >(flash::enable_sm90_or_later, cute::C<1>, cute::C<1> >, cute::tuple, cute::C<128>, cute::C<128> >, cutlass::bfloat16_t, float, cutlass::arch::Sm90, true, false, false, true, true, false, true, true, true, false, false>, flash::CollectiveEpilogueFwd, cute::C<128>, cute::C<128> >, cute::tuple, cute::C<1>, cute::C<1> >, cutlass::bfloat16_t, cutlass::arch::Sm90, 256, true, true, false>, flash::VarlenDynamicPersistentTileScheduler<128, 256, 128, false, true, true> > >::Params)", "cpu_time_us": 0, "cuda_time_us": 11.072, "pct_cuda_time": 0.16324224577217028, "trace": "_vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] }, { "entry": { "name": "void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor, at::detail::Array >(int, at::native::FillFunctor, at::detail::Array)", "cpu_time_us": 0, "cuda_time_us": 1.28, "pct_cuda_time": 0.018871935927418534, "trace": "fill_(int32[1], 0) <- zero_(int32[1]) <- zeros(None, 3, 0, None, None) <- _vllm_fa3_C::fwd(bfloat16[12, 1, 32, 128], bfloat16[28102, 16, 8, 128], bfloat16[28102, 16, 8, 128], None, None, bfloat16[12, 1, 32, 128], None, None, None, None, int32[12], None, None, int32[12, 9], None, None, None, None, None, None, None, 0.08838834764831845, True, -1, -1, 0, 0.0, True, 0, None, 0) <- vllm::unified_attention_with_output(bfloat16[12, 32, 128], bfloat16[12, 8, 128], bfloat16[12, 8, 128], bfloat16[12, 32, 128], None)" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 4096])", "cpu_time_us": 189.541, "cuda_time_us": 15.936, "pct_cuda_time": 0.23495560229636073, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 15.936, "pct_cuda_time": 0.23495560229636073, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 4096]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 4096]) <- linear(bfloat16[12, 4096], bfloat16[4096, 4096], None)" }, "children": [] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 86.565, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.104, "pct_cuda_time": 0.045764444623989944, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] }, { "entry": { "name": "LlamaMLP", "cpu_time_us": 433.34, "cuda_time_us": 134.141, "pct_cuda_time": 1.977734654093632, "trace": "" }, "children": [ { "entry": { "name": "MergedColumnParallelLinear(weight=bfloat16[28672, 4096])", "cpu_time_us": 142.52, "cuda_time_us": 80.83, "pct_cuda_time": 1.1917332664165936, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 80.83, "pct_cuda_time": 1.1917332664165936, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 28672]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 28672]) <- linear(bfloat16[12, 4096], bfloat16[28672, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "SiluAndMul", "cpu_time_us": 97.026, "cuda_time_us": 9.216, "pct_cuda_time": 0.13587793867741343, "trace": "" }, "children": [ { "entry": { "name": "void vllm::act_and_mul_kernel(c10::BFloat16 const&)), true>(c10::BFloat16*, c10::BFloat16 const*, int)", "cpu_time_us": 0, "cuda_time_us": 9.216, "pct_cuda_time": 0.13587793867741343, "trace": "_C::silu_and_mul(bfloat16[12, 14336], bfloat16[12, 28672])" }, "children": [] } ] }, { "entry": { "name": "RowParallelLinear(weight=bfloat16[4096, 14336])", "cpu_time_us": 137.772, "cuda_time_us": 44.095, "pct_cuda_time": 0.650123448999625, "trace": "" }, "children": [ { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 44.095, "pct_cuda_time": 0.650123448999625, "trace": "mm(bfloat16[12, 14336], bfloat16[14336, 4096]) <- matmul(bfloat16[12, 14336], bfloat16[14336, 4096]) <- linear(bfloat16[12, 14336], bfloat16[4096, 14336], None)" }, "children": [] } ] } ] } ] }, { "entry": { "name": "RMSNorm(weight=bfloat16[4096])", "cpu_time_us": 73.038, "cuda_time_us": 3.073, "pct_cuda_time": 0.04530738992574777, "trace": "" }, "children": [ { "entry": { "name": "std::enable_if<(((8)>(0)))&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kernel(c10::BFloat16*, c10::BFloat16*, c10::BFloat16 const*, float, int, int)", "cpu_time_us": 0, "cuda_time_us": 3.073, "pct_cuda_time": 0.04530738992574777, "trace": "_C::fused_add_rms_norm(bfloat16[12, 4096], bfloat16[12, 4096], bfloat16[4096], 1e-05)" }, "children": [] } ] } ] }, { "entry": { "name": "LogitsProcessor", "cpu_time_us": 485.092, "cuda_time_us": 351.38800000000003, "pct_cuda_time": 5.1807592356748, "trace": "" }, "children": [ { "entry": { "name": "void at::native::(anonymous namespace)::indexSelectSmallIndex(at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, at::cuda::detail::TensorInfo, int, int, unsigned int, long)", "cpu_time_us": 0, "cuda_time_us": 7.231, "pct_cuda_time": 0.1066116942899714, "trace": "index_select(bfloat16[12, 4096], 0, int64[12])" }, "children": [] }, { "entry": { "name": "Memset (Device)", "cpu_time_us": 0, "cuda_time_us": 0.737, "pct_cuda_time": 0.010866106858208952, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[12, 4096], bfloat16[128256, 4096], None)" }, "children": [] }, { "entry": { "name": "sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off_kernel__5x_cublas", "cpu_time_us": 0, "cuda_time_us": 343.42, "pct_cuda_time": 5.063281434526619, "trace": "mm(bfloat16[12, 4096], bfloat16[4096, 128256]) <- matmul(bfloat16[12, 4096], bfloat16[4096, 128256]) <- linear(bfloat16[12, 4096], bfloat16[128256, 4096], None)" }, "children": [] } ] }, { "entry": { "name": "Sampler", "cpu_time_us": 3941.07, "cuda_time_us": 124.896, "pct_cuda_time": 1.841429148117863, "trace": "" }, "children": [ { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010851363158265655, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.736, "pct_cuda_time": 0.010851363158265655, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011794959954636584, "trace": "copy_(int32[12], int32[12], True) <- _to_copy(int32[12], 3, 0, None, None, True, None) <- to(int32[12], 3, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011794959954636584, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011794959954636584, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.769, "pct_cuda_time": 0.011337905256394415, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "Memcpy HtoD (Pinned -> Device)", "cpu_time_us": 0, "cuda_time_us": 0.8, "pct_cuda_time": 0.011794959954636584, "trace": "copy_(bfloat16[12], bfloat16[12], True) <- _to_copy(bfloat16[12], 15, 0, None, None, True, None) <- to(bfloat16[12], 15, 0, None, None, True, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#7}::operator()() const::{lambda(float)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 6.784, "pct_cuda_time": 0.10002126041531822, "trace": "copy_(float32[12, 128256], bfloat16[12, 128256], False) <- _to_copy(bfloat16[12, 128256], 6, None, None, None, False, None) <- to(bfloat16[12, 128256], 6, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::elementwise_kernel<128, 4, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl > >(at::TensorIteratorBase&, at::native::BinaryFunctor > const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 8.928, "pct_cuda_time": 0.13163175309374428, "trace": "div_(float32[12, 128256], bfloat16[12, 1])" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::SoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 34.816, "pct_cuda_time": 0.5133166572257841, "trace": "_softmax(float32[12, 128256], -1, False) <- softmax(float32[12, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::(anonymous namespace)::cunn_SoftMaxForward<4, float, float, float, at::native::(anonymous namespace)::LogSoftMaxForwardEpilogue>(float*, float const*, int)", "cpu_time_us": 0, "cuda_time_us": 28.192, "pct_cuda_time": 0.41565438880139316, "trace": "_log_softmax(float32[12, 128256], -1, False) <- log_softmax(float32[12, 128256], -1, 6)" }, "children": [] }, { "entry": { "name": "void at::native::unrolled_elementwise_kernel, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1> >(int, at::native::direct_copy_kernel_cuda(at::TensorIteratorBase&)::{lambda()#3}::operator()() const::{lambda()#4}::operator()() const::{lambda(long)#1}, at::detail::Array, TrivialOffsetCalculator<1, unsigned int>, TrivialOffsetCalculator<1, unsigned int>, at::native::memory::LoadWithCast<1>, at::native::memory::StoreWithCast<1>)", "cpu_time_us": 0, "cuda_time_us": 1.824, "pct_cuda_time": 0.026892508696571407, "trace": "copy_(int64[12], int32[12], False) <- _to_copy(int32[12], 4, None, None, None, False, None) <- to(int32[12], 4, False, False, None)" }, "children": [] }, { "entry": { "name": "void at::native::index_elementwise_kernel<128, 4, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1}>(long, at::native::gpu_index_kernel >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1}>(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef, at::native::index_kernel_impl >(at::TensorIteratorBase&, c10::ArrayRef, c10::ArrayRef)::{lambda(char*, char const*, long)#1} const&)::{lambda(int)#1})", "cpu_time_us": 0, "cuda_time_us": 9.056, "pct_cuda_time": 0.13351894668648612, "trace": "index(float32[12, 128256], None)" }, "children": [] }, { "entry": { "name": "void at::native::reduce_kernel<512, 1, at::native::ReduceOp, unsigned int, long, 4> >(at::native::ReduceOp, unsigned int, long, 4>)", "cpu_time_us": 0, "cuda_time_us": 27.295, "pct_cuda_time": 0.4024292899522569, "trace": "argmax(float32[12, 128256], -1, False)" }, "children": [] }, { "entry": { "name": "Memcpy DtoH (Device -> Pageable)", "cpu_time_us": 0, "cuda_time_us": 2.56, "pct_cuda_time": 0.03774387185483707, "trace": "copy_(int64[12], int64[12], False) <- _to_copy(int64[12], 4, 0, None, None, False, None) <- to(int64[12], 4, 0, None, None, False, False, None)" }, "children": [] } ] } ] } }