vLLM 0.17.1
Version history | Download JSON
-
PyPI https://pypi.org/project/vLLM/ Repository https://github.com/vllm-project/vllm -
py.typed -
Coverage
%%{init: {"pie": {"textPosition": 0.85}, "theme": "neutral", "themeVariables": {"pieStrokeWidth": "1px"}}}%% pie title "Typed" : 49485 "Any" : 333 "Untyped" : 22689 -
Typables
%%{init: {"pie": {"textPosition": 0.85}, "theme": "neutral", "themeVariables": {"pieStrokeWidth": "1px"}}}%% pie title "functions" : 11839 "classes" : 43998 "other" : 1370- 2755 functions (+43 overloads)
- 9084 parameters
- 3834 classes
- 13408 methods (+20 overloads)
- 29905 parameters
- 676 properties
- 13408 methods (+20 overloads)
- 1264 modules
- 1370 attrs
- 2755 functions (+43 overloads)
Modules
Incomplete Annotations
vllm.assets.audio (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
librosa |
1 | 0 | 0 |
vllm.assets.video (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
librosa |
1 | 0 | 0 |
vllm.beam_search (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BeamSearchSequence.get_prompt |
1 | 0 | 0 |
meth |
BeamSearchInstance.init |
5 | 3 | 0 |
func |
create_sort_beams_key_function |
3 | 2 | 0 |
vllm.collect_env (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_cudnn_version |
2 | 0 | 0 |
func |
get_python_platform |
1 | 0 | 0 |
func |
get_cachingallocator_config |
1 | 0 | 0 |
func |
get_platform |
1 | 0 | 0 |
func |
get_os |
2 | 0 | 0 |
func |
get_cpu_info |
2 | 0 | 0 |
func |
get_gpu_info |
2 | 0 | 0 |
func |
get_nvidia_driver_version |
2 | 0 | 0 |
func |
check_release_file |
2 | 0 | 0 |
func |
get_env_info |
1 | 0 | 0 |
func |
get_lsb_version |
2 | 0 | 0 |
func |
run |
2 | 0 | 0 |
func |
get_windows_version |
2 | 0 | 0 |
func |
get_running_cuda_version |
2 | 0 | 0 |
func |
get_cmake_version |
2 | 0 | 0 |
func |
get_cuda_module_loading_config |
1 | 0 | 0 |
func |
get_clang_version |
2 | 0 | 0 |
func |
is_uv_venv |
1 | 0 | 0 |
func |
get_rocm_version |
2 | 0 | 0 |
func |
get_mac_version |
2 | 0 | 0 |
func |
pretty_str |
2 | 0 | 0 |
func |
get_gpu_topo |
2 | 0 | 0 |
func |
summarize_vllm_build_flags |
1 | 0 | 0 |
func |
get_env_vars |
1 | 0 | 0 |
func |
is_xnnpack_available |
1 | 0 | 0 |
func |
run_and_read_all |
3 | 0 | 0 |
func |
get_vllm_version |
1 | 0 | 0 |
attr |
env_info_fmt |
1 | 0 | 0 |
func |
main |
1 | 0 | 0 |
func |
get_conda_packages |
3 | 0 | 0 |
func |
get_pip_packages |
3 | 0 | 0 |
func |
get_pretty_env_info |
1 | 0 | 0 |
func |
run_and_parse_first_match |
4 | 0 | 0 |
func |
get_libc_version |
1 | 0 | 0 |
func |
get_gcc_version |
2 | 0 | 0 |
func |
get_nvidia_smi |
1 | 0 | 0 |
vllm.compilation.backends (15 missing, 7 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PiecewiseCompileInterpreter.run |
2 | 2 | 2 |
meth |
PiecewiseCompileInterpreter.call_module |
4 | 4 | 1 |
attr |
PiecewiseCompileInterpreter.fake_mode |
1 | 0 | 0 |
attr |
PiecewiseCompileInterpreter.compile_submod_names |
1 | 0 | 0 |
attr |
PiecewiseCompileInterpreter.compilation_config |
1 | 0 | 0 |
attr |
PiecewiseCompileInterpreter.vllm_config |
1 | 0 | 0 |
attr |
PiecewiseCompileInterpreter.vllm_backend |
1 | 0 | 0 |
attr |
PiecewiseCompileInterpreter.extra_traceback |
1 | 0 | 0 |
meth |
VllmBackend._log_compilation_config |
1 | 0 | 0 |
meth |
VllmBackend.call |
3 | 3 | 1 |
attr |
VllmBackend.prefix |
1 | 0 | 0 |
attr |
VllmBackend.is_encoder |
1 | 0 | 0 |
attr |
VllmBackend.pass_manager |
1 | 0 | 0 |
attr |
VllmBackend.pass_key |
1 | 0 | 0 |
func |
wrap_with_cudagraph_if_needed |
6 | 6 | 2 |
attr |
logger |
1 | 0 | 0 |
meth |
CompilerManager.compile |
8 | 8 | 1 |
attr |
CompilerManager.is_cache_updated |
1 | 0 | 0 |
attr |
CompilerManager.compilation_config |
1 | 0 | 0 |
attr |
CompilerManager.compiler |
1 | 0 | 0 |
vllm.compilation.caching (10 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
patch_pytree_map_over_slice |
1 | 0 | 0 |
meth |
VllmSerializableFunction.call |
3 | 3 | 3 |
attr |
VllmSerializableFunction.graph_module |
1 | 0 | 0 |
attr |
VllmSerializableFunction.example_inputs |
1 | 0 | 0 |
attr |
VllmSerializableFunction.prefix |
1 | 0 | 0 |
attr |
VllmSerializableFunction.optimized_call |
1 | 0 | 0 |
attr |
VllmSerializableFunction.is_encoder |
1 | 0 | 0 |
attr |
VllmSerializableFunction.shape_env |
1 | 0 | 0 |
attr |
VllmSerializableFunction.vllm_backend |
1 | 0 | 0 |
attr |
VllmSerializableFunction.sym_tensor_indices |
1 | 0 | 0 |
meth |
StandaloneCompiledArtifacts.get_loaded |
3 | 3 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.compiler_interface (2 missing, 9 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompilerInterface.load |
6 | 6 | 1 |
meth |
InductorAdaptor.load |
6 | 6 | 1 |
meth |
InductorStandaloneAdaptor.load |
6 | 6 | 1 |
attr |
InductorStandaloneAdaptor.save_format |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
AlwaysHitShapeEnv.evaluate_guards_expression |
3 | 3 | 2 |
meth |
AlwaysHitShapeEnv.get_pruned_guards |
3 | 3 | 2 |
meth |
AlwaysHitShapeEnv.produce_guards_expression |
3 | 3 | 2 |
vllm.compilation.counter (1 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompilationCounter.expect |
2 | 2 | 1 |
attr |
compilation_counter |
1 | 0 | 0 |
vllm.compilation.cuda_graph (12 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CUDAGraphWrapper.getattr |
2 | 2 | 1 |
meth |
CUDAGraphWrapper.call |
3 | 3 | 2 |
attr |
CUDAGraphWrapper.runnable |
1 | 0 | 0 |
attr |
CUDAGraphWrapper.vllm_config |
1 | 0 | 0 |
attr |
CUDAGraphWrapper.runtime_mode |
1 | 0 | 0 |
attr |
CUDAGraphWrapper.compilation_config |
1 | 0 | 0 |
attr |
CUDAGraphWrapper.first_run_finished |
1 | 0 | 0 |
attr |
CUDAGraphWrapper.is_debugging_mode |
1 | 0 | 0 |
attr |
CUDAGraphWrapper.graph_pool |
1 | 0 | 0 |
attr |
CUDAGraphWrapper.cudagraph_options |
1 | 0 | 0 |
attr |
CUDAGraphLogging.cg_mode |
1 | 0 | 0 |
attr |
CUDAGraphLogging.cg_capture_sizes |
1 | 0 | 0 |
attr |
CUDAGraphLogging.settings_header |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.decorators (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.compilation.monitor (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.compilation.partition_rules (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.fusion.act_quant_fusion (9 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
silu_and_mul_nvfp4_quant_supported |
1 | 0 | 0 |
meth |
ActivationQuantPattern.empty_quant |
3 | 3 | 2 |
attr |
ActivationQuantPattern.quant_key |
1 | 0 | 0 |
attr |
ActivationQuantPattern.quant_dtype |
1 | 0 | 0 |
attr |
ActivationQuantPattern.QUANT_OP |
1 | 0 | 0 |
attr |
ActivationQuantPattern.FUSED_OP |
1 | 0 | 0 |
attr |
ActivationQuantPattern.silu_and_mul_matcher |
1 | 0 | 0 |
attr |
FP8_DTYPE |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
SiluMulFp8StaticQuantPattern.quant_matcher |
1 | 0 | 0 |
vllm.compilation.passes.fusion.allreduce_rms_fusion (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormStaticQuantFP8Pattern.epsilon |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormStaticQuantFP8Pattern.allreduce_params |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormStaticQuantFP8Pattern.quant_dtype |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormStaticQuantFP8Pattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormStaticQuantFP8Pattern.quant_matcher |
1 | 0 | 0 |
attr |
AllReduceFusionPass.disabled |
1 | 0 | 0 |
attr |
AllReduceFusionPass.tp_size |
1 | 0 | 0 |
attr |
AllReduceFusionPass.hidden_dim |
1 | 0 | 0 |
attr |
AllReduceFusionPass.group |
1 | 0 | 0 |
attr |
AllReduceFusionPass.max_token_num |
1 | 0 | 0 |
attr |
AllReduceFusionPass.allreduce_params |
1 | 0 | 0 |
attr |
FP8_DTYPE |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormStaticQuantNVFP4Pattern.epsilon |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormStaticQuantNVFP4Pattern.allreduce_params |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormStaticQuantNVFP4Pattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
AllReduceRMSNormPattern.epsilon |
1 | 0 | 0 |
attr |
AllReduceRMSNormPattern.allreduce_params |
1 | 0 | 0 |
attr |
AllReduceRMSNormPattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
AllReduceFusedRMSNormStaticQuantFP8Pattern.epsilon |
1 | 0 | 0 |
attr |
AllReduceFusedRMSNormStaticQuantFP8Pattern.allreduce_params |
1 | 0 | 0 |
attr |
AllReduceFusedRMSNormStaticQuantFP8Pattern.quant_dtype |
1 | 0 | 0 |
attr |
AllReduceFusedRMSNormStaticQuantFP8Pattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
AllReduceFusedRMSNormStaticQuantFP8Pattern.quant_matcher |
1 | 0 | 0 |
attr |
BasePattern.dtype |
1 | 0 | 0 |
attr |
BasePattern.device |
1 | 0 | 0 |
attr |
BasePattern.tp |
1 | 0 | 0 |
attr |
BasePattern.tp_size |
1 | 0 | 0 |
attr |
AllReduceFusedRMSNormStaticQuantNVFP4Pattern.epsilon |
1 | 0 | 0 |
attr |
AllReduceFusedRMSNormStaticQuantNVFP4Pattern.allreduce_params |
1 | 0 | 0 |
attr |
AllReduceFusedRMSNormStaticQuantNVFP4Pattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
FlashInferFusedAllReduceParams.world_size |
1 | 0 | 0 |
attr |
FlashInferFusedAllReduceParams.launch_with_pdl |
1 | 0 | 0 |
attr |
FlashInferFusedAllReduceParams.fp32_acc |
1 | 0 | 0 |
attr |
FlashInferFusedAllReduceParams.max_token_num |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormPattern.epsilon |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormPattern.allreduce_params |
1 | 0 | 0 |
attr |
AllReduceFusedAddRMSNormPattern.rmsnorm_matcher |
1 | 0 | 0 |
vllm.compilation.passes.fusion.attn_quant_fusion (12 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
AttentionFp8StaticQuantPattern.quant_matcher |
1 | 0 | 0 |
meth |
AttentionQuantPattern.empty |
3 | 3 | 2 |
meth |
AttentionQuantPattern.empty_quant |
3 | 3 | 2 |
attr |
AttentionQuantPattern.layer |
1 | 0 | 0 |
attr |
AttentionQuantPattern.layer_name |
1 | 0 | 0 |
attr |
AttentionQuantPattern.num_heads |
1 | 0 | 0 |
attr |
AttentionQuantPattern.head_size |
1 | 0 | 0 |
attr |
AttentionQuantPattern.quant_key |
1 | 0 | 0 |
attr |
AttentionQuantPattern.quant_dtype |
1 | 0 | 0 |
attr |
AttentionQuantPattern.dtype |
1 | 0 | 0 |
attr |
AttentionQuantPattern.QUANT_OP |
1 | 0 | 0 |
attr |
AttnFusionPass.patterns |
1 | 0 | 0 |
attr |
FP8_DTYPE |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.fusion.collective_fusion (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
FP8_DTYPE |
1 | 0 | 0 |
attr |
BasePattern.dtype |
1 | 0 | 0 |
attr |
BasePattern.device |
1 | 0 | 0 |
attr |
BasePattern.tp |
1 | 0 | 0 |
attr |
BasePattern.tp_size |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.fusion.matcher_utils (23 missing, 15 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MatcherFusedAddRMSNorm.epsilon |
1 | 0 | 0 |
attr |
MatcherFusedAddRMSNorm.match_rocm_aiter |
1 | 0 | 0 |
attr |
MatcherRotaryEmbedding.is_neox |
1 | 0 | 0 |
attr |
MatcherRotaryEmbedding.head_size |
1 | 0 | 0 |
attr |
MatcherRotaryEmbedding.num_heads |
1 | 0 | 0 |
attr |
MatcherRotaryEmbedding.num_kv_heads |
1 | 0 | 0 |
attr |
MatcherRotaryEmbedding.q_size |
1 | 0 | 0 |
attr |
MatcherRotaryEmbedding.kv_size |
1 | 0 | 0 |
attr |
MatcherRotaryEmbedding.rotary_dim |
1 | 0 | 0 |
attr |
MatcherRotaryEmbedding.rotary_op |
1 | 0 | 0 |
attr |
MatcherQuantFP8.quant_key |
1 | 0 | 0 |
attr |
MatcherQuantFP8.has_col_major_scales |
1 | 0 | 0 |
attr |
MatcherQuantFP8.is_e8m0 |
1 | 0 | 0 |
attr |
MatcherQuantFP8.match_rocm_aiter |
1 | 0 | 0 |
attr |
MatcherQuantFP8.is_tma_aligned |
1 | 0 | 0 |
attr |
MatcherQuantFP8.quant_fp8 |
1 | 0 | 0 |
attr |
MatcherQuantFP8.QUANT_OP |
1 | 0 | 0 |
attr |
MatcherRMSNorm.epsilon |
1 | 0 | 0 |
attr |
MatcherRMSNorm.match_rocm_aiter |
1 | 0 | 0 |
meth |
MatcherCustomOp.forward_custom |
3 | 3 | 3 |
meth |
MatcherCustomOp.forward_native |
3 | 3 | 3 |
meth |
MatcherCustomOp.call |
3 | 3 | 3 |
meth |
MatcherCustomOp.empty |
3 | 3 | 2 |
meth |
MatcherCustomOp.empty_int64 |
3 | 3 | 2 |
meth |
MatcherCustomOp.empty_f32 |
3 | 3 | 2 |
attr |
MatcherCustomOp.model_dtype |
1 | 0 | 0 |
attr |
MatcherCustomOp.device |
1 | 0 | 0 |
attr |
MatcherCustomOp.enabled |
1 | 0 | 0 |
attr |
MatcherCustomOp.forward |
1 | 0 | 0 |
vllm.compilation.passes.fusion.qk_norm_rope_fusion (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
QkNormRopePattern.num_heads |
1 | 0 | 0 |
attr |
QkNormRopePattern.num_kv_heads |
1 | 0 | 0 |
attr |
QkNormRopePattern.head_dim |
1 | 0 | 0 |
attr |
QkNormRopePattern.q_size |
1 | 0 | 0 |
attr |
QkNormRopePattern.kv_size |
1 | 0 | 0 |
attr |
QkNormRopePattern.eps |
1 | 0 | 0 |
attr |
QkNormRopePattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
QkNormRopePattern.is_neox |
1 | 0 | 0 |
attr |
QkNormRopePattern.rope_flashinfer |
1 | 0 | 0 |
attr |
QkNormRopePattern.rope_matcher |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.fusion.rms_quant_fusion (15 missing, 8 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
empty_fp32 |
3 | 3 | 2 |
attr |
RMSNormGroupQuantPattern.group_shape |
1 | 0 | 0 |
attr |
RMSNormGroupQuantPattern.has_col_major_scales |
1 | 0 | 0 |
attr |
RMSNormGroupQuantPattern.is_tma_aligned |
1 | 0 | 0 |
attr |
FP8_DTYPE |
1 | 0 | 0 |
func |
empty_i32 |
3 | 3 | 2 |
attr |
RMSNormQuantPattern.epsilon |
1 | 0 | 0 |
attr |
RMSNormQuantPattern.quant_dtype |
1 | 0 | 0 |
attr |
RMSNormQuantPattern.model_dtype |
1 | 0 | 0 |
attr |
RMSNormQuantPattern.FUSED_OP |
1 | 0 | 0 |
attr |
RMSNormQuantPattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
RMSNormQuantPattern.quant_matcher |
1 | 0 | 0 |
func |
empty_bf16 |
3 | 3 | 2 |
func |
empty_i64 |
3 | 3 | 2 |
attr |
FusedAddRMSNormGroupQuantPattern.group_shape |
1 | 0 | 0 |
attr |
FusedAddRMSNormGroupQuantPattern.is_e8m0 |
1 | 0 | 0 |
attr |
FusedAddRMSNormGroupQuantPattern.has_col_major_scales |
1 | 0 | 0 |
attr |
FusedAddRMSNormGroupQuantPattern.is_tma_aligned |
1 | 0 | 0 |
vllm.compilation.passes.fusion.rocm_aiter_fusion (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
AiterRMSNormDynamicQuantPattern.FUSED_OP |
1 | 0 | 0 |
attr |
AiterRMSFp8GroupQuantPattern.FUSED_OP |
1 | 0 | 0 |
meth |
AddAiterRMSNormPadPattern.init |
4 | 3 | 0 |
attr |
AddAiterRMSNormPadPattern.AITER_TRITON_ADD_RMSNORM_PAD_OP |
1 | 0 | 0 |
attr |
AddAiterRMSNormPadPattern.epsilon |
1 | 0 | 0 |
attr |
AddAiterRMSNormPadPattern.hidden_size |
1 | 0 | 0 |
attr |
AddAiterRMSNormPadPattern.x_pad_to_multiple |
1 | 0 | 0 |
attr |
AddAiterRMSNormPadPattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
AiterSiluMulFp8GroupQuantPattern.FUSED_SILU_MUL_QUANT_OP |
1 | 0 | 0 |
attr |
AiterSiluMulFp8GroupQuantPattern.silu_and_mul_matcher |
1 | 0 | 0 |
attr |
AiterSiluMulFp8GroupQuantPattern.quant_matcher |
1 | 0 | 0 |
attr |
FP8_DTYPE |
1 | 0 | 0 |
attr |
AiterFusedAddRMSFp8GroupQuantPattern.FUSED_OP |
1 | 0 | 0 |
attr |
AiterFusedAddRMSNormDynamicQuantPattern.FUSED_OP |
1 | 0 | 0 |
meth |
AiterRMSNormQuantPattern.init |
4 | 3 | 0 |
attr |
AiterRMSNormQuantPattern.epsilon |
1 | 0 | 0 |
attr |
AiterRMSNormQuantPattern.quant_dtype |
1 | 0 | 0 |
attr |
AiterRMSNormQuantPattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
AiterRMSNormQuantPattern.quant_matcher |
1 | 0 | 0 |
meth |
RocmAiterTritonAddRMSNormPadFusionPass.init |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.fusion.rope_kvcache_fusion (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
RopeReshapeKVCachePattern.layer_name |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.num_heads |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.num_kv_heads |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.head_size |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.head_size_v |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.is_neox |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.q_size |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.k_size |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.v_size |
1 | 0 | 0 |
attr |
RopeReshapeKVCachePattern.rope_matcher |
1 | 0 | 0 |
attr |
RopeKVCacheFusionPass.max_token_num |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.fusion.sequence_parallelism (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MiddleAllReduceRMSNormPattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
FirstAllReduceRMSNormPattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
SequenceParallelismPass.min_token_num |
1 | 0 | 0 |
attr |
SequenceParallelismPass.noop_cleanup |
1 | 0 | 0 |
attr |
MiddleAllReduceRMSNormStaticFP8Pattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
MiddleAllReduceRMSNormStaticFP8Pattern.quant_matcher |
1 | 0 | 0 |
attr |
FirstAllReduceRMSNormStaticFP8Pattern.rmsnorm_matcher |
1 | 0 | 0 |
attr |
FirstAllReduceRMSNormStaticFP8Pattern.quant_matcher |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.inductor_pass (2 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CallableInductorPass.uuid |
1 | 1 | 1 |
attr |
CallableInductorPass.callable |
1 | 0 | 0 |
meth |
PassContext.init |
2 | 1 | 0 |
vllm.compilation.passes.pass_manager (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.utility.fix_functionalization (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.utility.noop_elimination (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.utility.scatter_split_replace (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.utility.split_coalescing (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.compilation.passes.vllm_inductor_pass (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
PrinterInductorPass.name |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
VllmInductorPass.init |
2 | 1 | 0 |
attr |
VllmInductorPass.compilation_config |
1 | 0 | 0 |
attr |
VllmInductorPass.pass_config |
1 | 0 | 0 |
attr |
VllmInductorPass.model_dtype |
1 | 0 | 0 |
attr |
VllmInductorPass.pass_name |
1 | 0 | 0 |
vllm.compilation.piecewise_backend (20 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PiecewiseBackend.init |
10 | 9 | 0 |
meth |
PiecewiseBackend._log_compile_start |
2 | 1 | 0 |
meth |
PiecewiseBackend._maybe_compile_for_range_entry |
3 | 3 | 1 |
meth |
PiecewiseBackend.call |
2 | 2 | 2 |
attr |
PiecewiseBackend.graph |
1 | 0 | 0 |
attr |
PiecewiseBackend.vllm_config |
1 | 0 | 0 |
attr |
PiecewiseBackend.compilation_config |
1 | 0 | 0 |
attr |
PiecewiseBackend.piecewise_compile_index |
1 | 0 | 0 |
attr |
PiecewiseBackend.total_piecewise_compiles |
1 | 0 | 0 |
attr |
PiecewiseBackend.vllm_backend |
1 | 0 | 0 |
attr |
PiecewiseBackend.compiled_runnables |
1 | 0 | 0 |
attr |
PiecewiseBackend.submod_name |
1 | 0 | 0 |
attr |
PiecewiseBackend.is_first_graph |
1 | 0 | 0 |
attr |
PiecewiseBackend.is_last_graph |
1 | 0 | 0 |
attr |
PiecewiseBackend.is_full_graph |
1 | 0 | 0 |
attr |
PiecewiseBackend.is_encoder_compilation |
1 | 0 | 0 |
attr |
PiecewiseBackend.compile_ranges |
1 | 0 | 0 |
attr |
PiecewiseBackend.compile_sizes |
1 | 0 | 0 |
attr |
PiecewiseBackend.sym_shape_indices |
1 | 0 | 0 |
attr |
PiecewiseBackend.returns_tuple |
1 | 0 | 0 |
attr |
PiecewiseBackend.on_compilation_complete |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.compilation.wrapper (6 missing, 13 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TorchCompileWithNoGuardsWrapper.check_invariants_and_forward |
3 | 3 | 3 |
meth |
TorchCompileWithNoGuardsWrapper._call_with_optional_nvtx_range |
4 | 4 | 1 |
meth |
TorchCompileWithNoGuardsWrapper.aot_compile |
3 | 3 | 3 |
meth |
TorchCompileWithNoGuardsWrapper.call |
3 | 3 | 3 |
meth |
TorchCompileWithNoGuardsWrapper.forward |
3 | 3 | 3 |
attr |
TorchCompileWithNoGuardsWrapper.compiled |
1 | 0 | 0 |
attr |
TorchCompileWithNoGuardsWrapper.vllm_config |
1 | 0 | 0 |
attr |
TorchCompileWithNoGuardsWrapper.layerwise_nvtx_tracing_enabled |
1 | 0 | 0 |
attr |
TorchCompileWithNoGuardsWrapper.first_compile |
1 | 0 | 0 |
attr |
TorchCompileWithNoGuardsWrapper.evaluate_guards |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.config.attention (0 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AttentionConfig.validate_backend_before |
2 | 2 | 2 |
vllm.config.cache (2 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CacheConfig.metrics_info |
1 | 0 | 0 |
meth |
CacheConfig.verify_with_parallel_config |
2 | 2 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.config.compilation (5 missing, 10 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompilationConfig.validate_mode_before |
2 | 2 | 2 |
meth |
CompilationConfig.validate_cudagraph_mode_before |
2 | 2 | 2 |
meth |
CompilationConfig.validate_pass_config_before |
2 | 2 | 2 |
meth |
CompilationConfig._skip_none_validation |
3 | 3 | 2 |
meth |
CompilationConfig.set_splitting_ops_for_v1 |
3 | 2 | 0 |
meth |
CompilationConfig.set_splitting_ops_for_attn_fusion |
1 | 0 | 0 |
meth |
CompilationConfig.custom_op_log_check |
1 | 0 | 0 |
meth |
CompilationConfig.adjust_cudagraph_sizes_for_spec_decode |
3 | 2 | 0 |
meth |
PassConfig._skip_none_validation |
3 | 3 | 2 |
attr |
logger |
1 | 0 | 0 |
vllm.config.device (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeviceConfig.post_init |
1 | 0 | 0 |
vllm.config.ec_transfer (2 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ECTransferConfig.get_from_extra_config |
3 | 1 | 1 |
vllm.config.kernel (0 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KernelConfig._normalize_moe_backend |
2 | 2 | 2 |
meth |
KernelConfig._skip_none_validation |
3 | 3 | 2 |
vllm.config.kv_events (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KVEventsConfig.post_init |
1 | 0 | 0 |
vllm.config.kv_transfer (2 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KVTransferConfig.get_from_extra_config |
3 | 1 | 1 |
vllm.config.load (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.config.lora (2 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LoRAConfig.verify_with_model_config |
2 | 1 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.config.model (20 missing, 14 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModelConfig._apply_dict_overrides |
3 | 3 | 1 |
meth |
ModelConfig._skip_none_validation |
3 | 3 | 2 |
meth |
ModelConfig.validate_quantization_before |
2 | 2 | 2 |
meth |
ModelConfig._try_verify_and_update_model_config |
1 | 0 | 0 |
meth |
ModelConfig.verify_dual_chunk_attention_config |
2 | 2 | 1 |
meth |
ModelConfig.verify_with_parallel_config |
2 | 2 | 1 |
meth |
ModelConfig.get_num_kv_heads |
2 | 2 | 1 |
meth |
ModelConfig.get_num_attention_heads |
2 | 2 | 1 |
meth |
ModelConfig.get_layers_start_end_indices |
2 | 2 | 1 |
meth |
ModelConfig.get_num_layers |
2 | 2 | 1 |
meth |
ModelConfig.get_num_layers_by_block_type |
3 | 3 | 1 |
meth |
ModelConfig.get_and_verify_max_len |
2 | 1 | 0 |
prop |
ModelConfig.registry |
1 | 0 | 0 |
prop |
ModelConfig.has_inner_state |
1 | 0 | 0 |
prop |
ModelConfig.matryoshka_dimensions |
1 | 0 | 0 |
prop |
ModelConfig.embedding_size |
1 | 0 | 0 |
attr |
ModelConfig.hf_config |
1 | 1 | 1 |
attr |
ModelConfig.hf_text_config |
1 | 1 | 1 |
attr |
ModelConfig.attention_chunk_size |
1 | 0 | 0 |
attr |
ModelConfig.encoder_config |
1 | 0 | 0 |
attr |
ModelConfig.hf_image_processor_config |
1 | 0 | 0 |
attr |
ModelConfig.model_arch_config |
1 | 0 | 0 |
attr |
ModelConfig.runner_type |
1 | 0 | 0 |
attr |
ModelConfig.convert_type |
1 | 0 | 0 |
attr |
ModelConfig.original_max_model_len |
1 | 0 | 0 |
attr |
ModelConfig.config_updated |
1 | 0 | 0 |
func |
iter_architecture_defaults |
1 | 0 | 0 |
func |
str_dtype_to_torch_dtype |
2 | 1 | 0 |
attr |
me_models |
1 | 0 | 0 |
func |
get_served_model_name |
3 | 2 | 0 |
attr |
me_quant |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.config.model_arch (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.config.multimodal (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MultiModalConfig._validate_multimodal_config |
1 | 0 | 0 |
meth |
MultiModalConfig.is_multimodal_pruning_enabled |
1 | 0 | 0 |
vllm.config.observability (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ObservabilityConfig._validate_tracing_config |
1 | 0 | 0 |
vllm.config.parallel (2 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ParallelConfig._skip_none_validation |
3 | 3 | 2 |
meth |
ParallelConfig.compute_hash |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.config.pooler (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.config.profiler (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.config.scheduler (3 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SchedulerConfig.default_factory |
2 | 0 | 0 |
meth |
SchedulerConfig._skip_none_validation |
3 | 3 | 2 |
attr |
logger |
1 | 0 | 0 |
vllm.config.speculative (6 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeculativeConfig.hf_config_override |
2 | 2 | 2 |
meth |
SpeculativeConfig.post_init |
1 | 0 | 0 |
meth |
SpeculativeConfig.validate_suffix_decoding |
1 | 0 | 0 |
meth |
SpeculativeConfig._verify_and_get_draft_tp |
4 | 4 | 1 |
meth |
SpeculativeConfig.update_arch |
1 | 0 | 0 |
meth |
SpeculativeConfig.verify_equal_vocab_size_if_draft_model |
1 | 0 | 0 |
attr |
me_quant |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.config.utils (4 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
config |
4 | 4 | 1 |
func |
replace |
3 | 2 | 0 |
func |
normalize_value |
2 | 0 | 0 |
func |
get_field |
3 | 3 | 1 |
func |
getattr_iter |
6 | 6 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.config.vllm (10 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_cached_compilation_config |
1 | 0 | 0 |
meth |
VllmConfig.with_hf_config |
3 | 3 | 1 |
meth |
VllmConfig._set_config_default |
4 | 4 | 2 |
meth |
VllmConfig.post_init |
1 | 0 | 0 |
meth |
VllmConfig._set_max_num_scheduled_tokens |
1 | 0 | 0 |
meth |
VllmConfig._set_cudagraph_sizes |
1 | 0 | 0 |
meth |
VllmConfig._set_compile_ranges |
1 | 0 | 0 |
meth |
VllmConfig.try_verify_and_update_config |
1 | 0 | 0 |
meth |
VllmConfig.str |
1 | 0 | 0 |
func |
set_current_vllm_config |
4 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.connections (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
global_http_connection |
1 | 0 | 0 |
meth |
HTTPConnection._validate_http_url |
2 | 1 | 0 |
meth |
HTTPConnection.get_response |
6 | 5 | 0 |
meth |
HTTPConnection.get_async_response |
5 | 4 | 0 |
attr |
HTTPConnection.reuse_client |
1 | 0 | 0 |
vllm.device_allocator.cumem (6 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
libcudart |
1 | 1 | 1 |
attr |
lib_name |
1 | 0 | 0 |
meth |
CuMemAllocator.init |
1 | 0 | 0 |
meth |
CuMemAllocator.use_memory_pool |
2 | 1 | 0 |
attr |
CuMemAllocator.python_malloc_callback |
1 | 0 | 0 |
attr |
CuMemAllocator.python_free_callback |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.communication_op (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
broadcast_tensor_dict |
3 | 2 | 0 |
vllm.distributed.device_communicators.all2all (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepEPHTAll2AllManager.init |
3 | 0 | 0 |
meth |
DeepEPHTAll2AllManager.get_handle |
2 | 0 | 0 |
meth |
DeepEPHTAll2AllManager.set_num_sms |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
DeepEPAll2AllManagerBase.init |
3 | 0 | 0 |
meth |
DeepEPAll2AllManagerBase.get_handle |
2 | 0 | 0 |
meth |
DeepEPAll2AllManagerBase.destroy |
1 | 0 | 0 |
attr |
DeepEPAll2AllManagerBase.handle_cache |
1 | 0 | 0 |
attr |
DeepEPAll2AllManagerBase.num_sms |
1 | 0 | 0 |
meth |
AgRsAll2AllManager.init |
3 | 0 | 0 |
meth |
AgRsAll2AllManager.destroy |
1 | 0 | 0 |
meth |
MoriAll2AllManager.init |
2 | 0 | 0 |
meth |
MoriAll2AllManager._make_all2all_kwargs |
11 | 10 | 0 |
meth |
MoriAll2AllManager._make_handle |
2 | 0 | 0 |
meth |
MoriAll2AllManager.get_handle |
2 | 0 | 0 |
attr |
MoriAll2AllManager.handle_cache |
1 | 0 | 0 |
meth |
NaiveAll2AllManager.init |
3 | 0 | 0 |
meth |
NaiveAll2AllManager.destroy |
1 | 0 | 0 |
meth |
FlashInferAllToAllManager.init |
3 | 0 | 0 |
meth |
FlashInferAllToAllManager.initialize |
4 | 3 | 0 |
meth |
FlashInferAllToAllManager.ensure_alltoall_workspace_initialized |
1 | 0 | 0 |
meth |
FlashInferAllToAllManager.get_handle |
2 | 0 | 0 |
meth |
FlashInferAllToAllManager.cleanup |
1 | 0 | 0 |
attr |
FlashInferAllToAllManager.initialized |
1 | 0 | 0 |
attr |
FlashInferAllToAllManager.alltoall_info |
1 | 0 | 0 |
meth |
DeepEPLLAll2AllManager.init |
3 | 0 | 0 |
meth |
DeepEPLLAll2AllManager.get_handle |
2 | 0 | 0 |
vllm.distributed.device_communicators.base_device_communicator (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cache.init |
1 | 0 | 0 |
meth |
Cache.get_or_create |
3 | 0 | 0 |
meth |
DeviceCommunicatorBase.init |
7 | 6 | 0 |
meth |
DeviceCommunicatorBase.destroy |
1 | 0 | 0 |
meth |
DeviceCommunicatorBase.batch_isend_irecv |
2 | 1 | 0 |
attr |
DeviceCommunicatorBase.device |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.cpu_group |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.device_group |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.unique_name |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.is_ep_communicator |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.use_all2all |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.all2all_backend |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.rank |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.world_size |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.ranks |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.global_rank |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.global_world_size |
1 | 0 | 0 |
attr |
DeviceCommunicatorBase.rank_in_group |
1 | 0 | 0 |
meth |
All2AllManagerBase.init |
3 | 0 | 0 |
meth |
All2AllManagerBase.get_handle |
2 | 0 | 0 |
meth |
All2AllManagerBase.set_num_sms |
2 | 1 | 0 |
meth |
All2AllManagerBase.combine |
3 | 2 | 0 |
meth |
All2AllManagerBase.destroy |
1 | 0 | 0 |
attr |
All2AllManagerBase.cpu_group |
1 | 0 | 0 |
attr |
All2AllManagerBase.tcp_store_group |
1 | 0 | 0 |
attr |
All2AllManagerBase.dp_group |
1 | 0 | 0 |
attr |
All2AllManagerBase.tp_group |
1 | 0 | 0 |
attr |
All2AllManagerBase.dp_rank |
1 | 0 | 0 |
attr |
All2AllManagerBase.dp_world_size |
1 | 0 | 0 |
attr |
All2AllManagerBase.internode |
1 | 0 | 0 |
vllm.distributed.device_communicators.cpu_communicator (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
CpuCommunicator.init |
5 | 4 | 0 |
meth |
CpuCommunicator.all_reduce |
2 | 0 | 0 |
attr |
CpuCommunicator.dist_module |
1 | 0 | 0 |
attr |
CpuCommunicator.all2all_backend |
1 | 0 | 0 |
attr |
CpuCommunicator.all2all_manager |
1 | 0 | 0 |
vllm.distributed.device_communicators.cuda_communicator (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CudaCommunicator.init |
8 | 7 | 0 |
meth |
CudaCommunicator.all_reduce |
2 | 0 | 0 |
meth |
CudaCommunicator.reduce_scatter |
3 | 2 | 0 |
meth |
CudaCommunicator.reduce_scatterv |
4 | 3 | 0 |
meth |
CudaCommunicator.destroy |
1 | 0 | 0 |
meth |
CudaCommunicator.all_gatherv |
4 | 3 | 0 |
meth |
CudaCommunicator.batch_isend_irecv |
2 | 1 | 0 |
attr |
CudaCommunicator.use_custom_allreduce |
1 | 0 | 0 |
attr |
CudaCommunicator.use_torch_symm_mem |
1 | 0 | 0 |
attr |
CudaCommunicator.use_flashinfer_allreduce |
1 | 0 | 0 |
attr |
CudaCommunicator.all2all_manager |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.cuda_wrapper (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CudaRTLibrary.init |
2 | 1 | 0 |
attr |
CudaRTLibrary.exported_functions |
1 | 0 | 0 |
attr |
CudaRTLibrary.lib |
1 | 0 | 0 |
attr |
CudaRTLibrary.funcs |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.custom_all_reduce (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_weak_contiguous |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
CustomAllreduce.init |
5 | 3 | 0 |
meth |
CustomAllreduce.capture |
1 | 0 | 0 |
meth |
CustomAllreduce.register_graph_buffers |
1 | 0 | 0 |
meth |
CustomAllreduce.should_custom_ar |
2 | 1 | 0 |
meth |
CustomAllreduce.all_reduce |
4 | 3 | 0 |
meth |
CustomAllreduce.close |
1 | 0 | 0 |
meth |
CustomAllreduce.del |
1 | 0 | 0 |
attr |
CustomAllreduce.disabled |
1 | 0 | 0 |
attr |
CustomAllreduce.group |
1 | 0 | 0 |
attr |
CustomAllreduce.rank |
1 | 0 | 0 |
attr |
CustomAllreduce.device |
1 | 0 | 0 |
attr |
CustomAllreduce.meta_ptrs |
1 | 0 | 0 |
attr |
CustomAllreduce.buffer_ptrs |
1 | 0 | 0 |
attr |
CustomAllreduce.rank_data |
1 | 0 | 0 |
attr |
CustomAllreduce.max_size |
1 | 0 | 0 |
attr |
CustomAllreduce.world_size |
1 | 0 | 0 |
attr |
CustomAllreduce.fully_connected |
1 | 0 | 0 |
vllm.distributed.device_communicators.flashinfer_all_reduce (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_fi_ar_workspace |
1 | 0 | 0 |
func |
get_fi_ar_quant_workspace |
1 | 0 | 0 |
func |
destroy_fi_ar_workspace |
1 | 0 | 0 |
meth |
FlashInferAllReduce.init |
3 | 2 | 0 |
meth |
FlashInferAllReduce.destroy |
1 | 0 | 0 |
attr |
FlashInferAllReduce.disabled |
1 | 0 | 0 |
attr |
FlashInferAllReduce.group |
1 | 0 | 0 |
attr |
FlashInferAllReduce.world_size |
1 | 0 | 0 |
attr |
FlashInferAllReduce.rank |
1 | 0 | 0 |
attr |
FlashInferAllReduce.device |
1 | 0 | 0 |
attr |
FlashInferAllReduce.max_workspace_size |
1 | 0 | 0 |
attr |
FlashInferAllReduce.max_num_tokens |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.mnnvl_compat (3 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CustomCommunicator.init |
2 | 0 | 0 |
meth |
CustomCommunicator.allgather |
2 | 1 | 0 |
meth |
CustomCommunicator.bcast |
3 | 3 | 2 |
vllm.distributed.device_communicators.pynccl (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
register_nccl_symmetric_ops |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
PyNcclCommunicator.init |
4 | 3 | 0 |
meth |
PyNcclCommunicator.all_reduce |
5 | 4 | 0 |
meth |
PyNcclCommunicator.all_gather |
4 | 2 | 0 |
meth |
PyNcclCommunicator.all_gatherv |
5 | 3 | 0 |
meth |
PyNcclCommunicator.reduce_scatter |
5 | 3 | 0 |
meth |
PyNcclCommunicator.reduce_scatterv |
6 | 4 | 0 |
meth |
PyNcclCommunicator.send |
4 | 2 | 0 |
meth |
PyNcclCommunicator.recv |
4 | 2 | 0 |
meth |
PyNcclCommunicator.broadcast |
4 | 2 | 0 |
meth |
PyNcclCommunicator.group_start |
1 | 0 | 0 |
meth |
PyNcclCommunicator.group_end |
1 | 0 | 0 |
meth |
PyNcclCommunicator.register_comm_window |
2 | 1 | 0 |
meth |
PyNcclCommunicator.register_comm_window_raw |
3 | 2 | 0 |
meth |
PyNcclCommunicator.deregister_comm_window |
2 | 0 | 0 |
meth |
PyNcclCommunicator.batch_isend_irecv |
3 | 1 | 0 |
attr |
PyNcclCommunicator.group |
1 | 0 | 0 |
attr |
PyNcclCommunicator.available |
1 | 0 | 0 |
attr |
PyNcclCommunicator.disabled |
1 | 0 | 0 |
attr |
PyNcclCommunicator.nccl_version |
1 | 0 | 0 |
attr |
PyNcclCommunicator.device |
1 | 0 | 0 |
attr |
PyNcclCommunicator.rank |
1 | 0 | 0 |
attr |
PyNcclCommunicator.world_size |
1 | 0 | 0 |
attr |
PyNcclCommunicator.nccl |
1 | 0 | 0 |
attr |
PyNcclCommunicator.unique_id |
1 | 0 | 0 |
vllm.distributed.device_communicators.pynccl_allocator (14 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_symmetric_memory_enabled |
1 | 0 | 0 |
func |
is_symmetric_memory_tensor |
2 | 1 | 0 |
func |
compile_nccl_allocator |
1 | 0 | 0 |
meth |
nccl_symm_mem_context.init |
3 | 2 | 0 |
meth |
nccl_symm_mem_context.enter |
1 | 0 | 0 |
meth |
nccl_symm_mem_context.exit |
4 | 0 | 0 |
attr |
nccl_symm_mem_context.disabled |
1 | 0 | 0 |
attr |
nccl_symm_mem_context.is_graph_capture |
1 | 0 | 0 |
attr |
nccl_symm_mem_context.device |
1 | 0 | 0 |
func |
get_nccl_mem_pool |
1 | 0 | 0 |
func |
set_graph_pool_id |
2 | 2 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.pynccl_wrapper (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NCCLLibrary.init |
2 | 1 | 0 |
attr |
NCCLLibrary.exported_functions |
1 | 0 | 0 |
attr |
NCCLLibrary.lib |
1 | 0 | 0 |
vllm.distributed.device_communicators.quick_all_reduce (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuickAllReduce.init_quick_all_reduce |
1 | 0 | 0 |
meth |
QuickAllReduce._rocm_arch_available |
1 | 0 | 0 |
meth |
QuickAllReduce.create_shared_buffer |
1 | 0 | 0 |
meth |
QuickAllReduce.should_quick_allreduce |
2 | 1 | 0 |
meth |
QuickAllReduce.quick_all_reduce |
3 | 2 | 0 |
meth |
QuickAllReduce.close |
1 | 0 | 0 |
meth |
QuickAllReduce.del |
1 | 0 | 0 |
attr |
QuickAllReduce.disabled |
1 | 0 | 0 |
attr |
QuickAllReduce.group |
1 | 0 | 0 |
attr |
QuickAllReduce.rank |
1 | 0 | 0 |
attr |
QuickAllReduce.world_size |
1 | 0 | 0 |
attr |
QuickAllReduce.device |
1 | 0 | 0 |
attr |
QuickAllReduce.fully_connected |
1 | 0 | 0 |
func |
is_weak_contiguous |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.ray_communicator (8 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RayPPCommunicator.init |
7 | 6 | 1 |
meth |
RayPPCommunicator._build_actor_rank_mapping |
1 | 0 | 0 |
meth |
RayPPCommunicator.allgather |
3 | 2 | 0 |
meth |
RayPPCommunicator.allreduce |
4 | 3 | 0 |
meth |
RayPPCommunicator.reducescatter |
4 | 3 | 0 |
meth |
RayPPCommunicator.generate_communicator_id |
1 | 1 | 1 |
prop |
RayPPCommunicator.recv_stream |
1 | 0 | 0 |
prop |
RayPPCommunicator.send_stream |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.shm_broadcast (51 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
memory_fence |
1 | 0 | 0 |
meth |
SpinSleepTimer.init |
3 | 2 | 0 |
meth |
SpinSleepTimer.record_activity |
1 | 0 | 0 |
meth |
SpinSleepTimer.spin |
1 | 0 | 0 |
attr |
SpinSleepTimer.last_activity |
1 | 0 | 0 |
attr |
SpinSleepTimer.busy_loop_s |
1 | 0 | 0 |
attr |
SpinSleepTimer.wait_sleep_s |
1 | 0 | 0 |
meth |
ShmRingBuffer.init |
5 | 4 | 0 |
meth |
ShmRingBuffer.handle |
1 | 0 | 0 |
meth |
ShmRingBuffer.reduce |
1 | 0 | 0 |
meth |
ShmRingBuffer.del |
1 | 0 | 0 |
meth |
ShmRingBuffer.get_data |
2 | 1 | 0 |
meth |
ShmRingBuffer.get_metadata |
2 | 1 | 0 |
attr |
ShmRingBuffer.n_reader |
1 | 0 | 0 |
attr |
ShmRingBuffer.metadata_size |
1 | 0 | 0 |
attr |
ShmRingBuffer.max_chunk_bytes |
1 | 0 | 0 |
attr |
ShmRingBuffer.max_chunks |
1 | 0 | 0 |
attr |
ShmRingBuffer.total_bytes_of_buffer |
1 | 0 | 0 |
attr |
ShmRingBuffer.data_offset |
1 | 0 | 0 |
attr |
ShmRingBuffer.metadata_offset |
1 | 0 | 0 |
attr |
ShmRingBuffer.is_creator |
1 | 0 | 0 |
attr |
ShmRingBuffer.shared_memory |
1 | 0 | 0 |
meth |
MessageQueue.init |
7 | 4 | 0 |
meth |
MessageQueue.create_from_handle |
3 | 2 | 0 |
meth |
MessageQueue.wait_until_ready |
1 | 0 | 0 |
meth |
MessageQueue.acquire_write |
2 | 1 | 0 |
meth |
MessageQueue.acquire_read |
4 | 3 | 0 |
meth |
MessageQueue.enqueue |
3 | 1 | 0 |
meth |
MessageQueue.dequeue |
4 | 3 | 0 |
meth |
MessageQueue.recv |
3 | 3 | 1 |
meth |
MessageQueue.broadcast_object |
2 | 0 | 0 |
meth |
MessageQueue.create_from_process_group_single_reader |
6 | 4 | 0 |
meth |
MessageQueue.create_from_process_group |
7 | 4 | 0 |
attr |
MessageQueue.n_local_reader |
1 | 0 | 0 |
attr |
MessageQueue.n_remote_reader |
1 | 0 | 0 |
attr |
MessageQueue.local_reader_rank |
1 | 0 | 0 |
attr |
MessageQueue.handle |
1 | 0 | 0 |
attr |
MessageQueue.buffer |
1 | 0 | 0 |
attr |
MessageQueue.local_socket |
1 | 0 | 0 |
attr |
MessageQueue.current_idx |
1 | 0 | 0 |
attr |
MessageQueue.remote_socket |
1 | 0 | 0 |
attr |
from_bytes_big |
1 | 0 | 0 |
meth |
SpinTimer.record_activity |
1 | 0 | 0 |
meth |
SpinTimer.spin |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.shm_object_storage (29 missing, 6 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SingleWriterShmObjectStorage.init |
6 | 5 | 0 |
meth |
SingleWriterShmObjectStorage.put |
3 | 3 | 1 |
meth |
SingleWriterShmObjectStorage.get |
3 | 3 | 1 |
meth |
SingleWriterShmObjectStorage.handle |
1 | 0 | 0 |
attr |
SingleWriterShmObjectStorage.max_object_size |
1 | 0 | 0 |
attr |
SingleWriterShmObjectStorage.n_readers |
1 | 0 | 0 |
attr |
SingleWriterShmObjectStorage.serde_class |
1 | 0 | 0 |
attr |
SingleWriterShmObjectStorage.ser_de |
1 | 0 | 0 |
attr |
SingleWriterShmObjectStorage.ring_buffer |
1 | 0 | 0 |
attr |
SingleWriterShmObjectStorage.is_writer |
1 | 0 | 0 |
attr |
SingleWriterShmObjectStorage.flag_bytes |
1 | 0 | 0 |
meth |
ObjectSerde.serialize |
2 | 2 | 1 |
meth |
ObjectSerde.deserialize |
2 | 2 | 1 |
meth |
MsgpackSerde.init |
1 | 0 | 0 |
meth |
MsgpackSerde.serialize |
2 | 2 | 1 |
meth |
MsgpackSerde.deserialize |
2 | 2 | 1 |
attr |
MsgpackSerde.encoder |
1 | 0 | 0 |
attr |
MsgpackSerde.tensor_decoder |
1 | 0 | 0 |
attr |
MsgpackSerde.mm_decoder |
1 | 0 | 0 |
meth |
SingleWriterShmRingBuffer.init |
4 | 3 | 0 |
meth |
SingleWriterShmRingBuffer.handle |
1 | 0 | 0 |
meth |
SingleWriterShmRingBuffer.del |
1 | 0 | 0 |
meth |
SingleWriterShmRingBuffer.access_buf |
2 | 1 | 0 |
attr |
SingleWriterShmRingBuffer.data_buffer_size |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.is_writer |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.ID_NBYTES |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.ID_MAX |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.SIZE_NBYTES |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.MD_SIZE |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.monotonic_id_end |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.monotonic_id_start |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.data_buffer_start |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.data_buffer_end |
1 | 0 | 0 |
attr |
SingleWriterShmRingBuffer.shared_memory |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.symm_mem (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SymmMemCommunicator.init |
5 | 4 | 0 |
meth |
SymmMemCommunicator.should_use_symm_mem |
2 | 1 | 0 |
attr |
SymmMemCommunicator.disabled |
1 | 0 | 0 |
attr |
SymmMemCommunicator.dtype |
1 | 0 | 0 |
attr |
SymmMemCommunicator.device |
1 | 0 | 0 |
attr |
SymmMemCommunicator.group |
1 | 0 | 0 |
attr |
SymmMemCommunicator.world_size |
1 | 0 | 0 |
attr |
SymmMemCommunicator.device_capability |
1 | 0 | 0 |
attr |
SymmMemCommunicator.force_multimem |
1 | 0 | 0 |
attr |
SymmMemCommunicator.max_size |
1 | 0 | 0 |
attr |
SymmMemCommunicator.buffer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.device_communicators.xpu_communicator (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XpuCommunicator.init |
5 | 4 | 0 |
meth |
XpuCommunicator.all_reduce |
2 | 1 | 0 |
meth |
XpuCommunicator.reduce_scatter |
3 | 2 | 0 |
meth |
XpuCommunicator.reduce_scatterv |
4 | 3 | 0 |
meth |
XpuCommunicator.all_gatherv |
4 | 3 | 0 |
attr |
XpuCommunicator.all2all_manager |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.ec_transfer.ec_connector.base (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
ECConnectorBase.init |
3 | 2 | 0 |
meth |
ECConnectorBase.register_caches |
2 | 1 | 0 |
meth |
ECConnectorBase.start_load_caches |
3 | 2 | 0 |
meth |
ECConnectorBase.save_caches |
4 | 3 | 0 |
meth |
ECConnectorBase.update_state_after_alloc |
3 | 2 | 0 |
meth |
ECConnectorBase.update_connector_output |
2 | 1 | 0 |
vllm.distributed.ec_transfer.ec_connector.example_connector (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ECExampleConnectorMetadata.init |
1 | 0 | 0 |
meth |
ECExampleConnectorMetadata.add_mm_data |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
ECExampleConnector.init |
3 | 2 | 0 |
meth |
ECExampleConnector.start_load_caches |
3 | 1 | 0 |
meth |
ECExampleConnector.save_caches |
4 | 1 | 0 |
meth |
ECExampleConnector._found_match_for_mm_data |
2 | 1 | 0 |
meth |
MMMeta.make_meta |
3 | 1 | 0 |
vllm.distributed.ec_transfer.ec_connector.factory (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.distributed.elastic_ep.elastic_execute (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
ElasticEPScalingExecutor.init |
2 | 0 | 0 |
meth |
ElasticEPScalingExecutor.execute |
4 | 1 | 0 |
prop |
ElasticEPScalingExecutor.worker |
1 | 0 | 0 |
attr |
ElasticEPScalingExecutor.worker_ref |
1 | 0 | 0 |
attr |
ElasticEPScalingExecutor.reconfig_request |
1 | 0 | 0 |
vllm.distributed.elastic_ep.elastic_state (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ElasticEPScalingState.init |
8 | 7 | 0 |
meth |
ElasticEPScalingState._execute_tcp_store_barrier |
6 | 0 | 0 |
meth |
ElasticEPScalingState.handle_notification |
2 | 1 | 0 |
meth |
ElasticEPScalingState._create_standby_groups |
1 | 0 | 0 |
meth |
ElasticEPScalingState._transfer_weights |
1 | 0 | 0 |
meth |
ElasticEPScalingState._transfer_expert_mapping |
1 | 0 | 0 |
meth |
ElasticEPScalingState._sync_kv_cache_memory_size |
1 | 0 | 0 |
meth |
ElasticEPScalingState._switch_and_prepare |
1 | 0 | 0 |
meth |
ElasticEPScalingState._eplb_reshuffle |
1 | 0 | 0 |
meth |
ElasticEPScalingState._eplb_reshuffle_before_scale_down |
1 | 0 | 0 |
meth |
ElasticEPScalingState._switch_and_remove |
1 | 0 | 0 |
meth |
ElasticEPScalingState._update_parallel_config |
1 | 0 | 0 |
attr |
ElasticEPScalingState.model_executor_ref |
1 | 0 | 0 |
attr |
ElasticEPScalingState.engine_core_ref |
1 | 0 | 0 |
attr |
ElasticEPScalingState.vllm_config |
1 | 0 | 0 |
attr |
ElasticEPScalingState.old_dp_group |
1 | 0 | 0 |
attr |
ElasticEPScalingState.old_dp_store |
1 | 0 | 0 |
attr |
ElasticEPScalingState.new_dp_store |
1 | 0 | 0 |
attr |
ElasticEPScalingState.worker_type |
1 | 0 | 0 |
attr |
ElasticEPScalingState.scale_type |
1 | 0 | 0 |
attr |
ElasticEPScalingState.reconfig_request |
1 | 0 | 0 |
attr |
ElasticEPScalingState.state |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.eplb.async_worker (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.distributed.eplb.eplb_state (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EplbState.init |
3 | 2 | 0 |
meth |
EplbState.validate_ep_configuration |
2 | 1 | 0 |
meth |
EplbState.add_model |
3 | 2 | 0 |
meth |
EplbState.start_async_loop |
3 | 2 | 0 |
meth |
EplbState.move_to_workspace |
4 | 3 | 0 |
attr |
EplbState.parallel_config |
1 | 0 | 0 |
attr |
EplbState.device |
1 | 0 | 0 |
attr |
EplbState.rearrange_event |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.eplb.eplb_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_events (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NullEventPublisher.publish |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
ZmqEventPublisher.END_SEQ |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.factory (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.utils (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
kv_postprocess_layout_on_receive |
3 | 0 | 0 |
meth |
TpKVTopology.post_init |
1 | 0 | 0 |
func |
kv_postprocess_blksize_and_layout_on_receive |
4 | 0 | 0 |
func |
get_kv_connector_cache_layout |
1 | 0 | 0 |
meth |
KVOutputAggregator.init |
2 | 1 | 0 |
meth |
KVOutputAggregator.from_connector |
3 | 2 | 0 |
func |
get_current_attn_backend |
2 | 1 | 0 |
func |
kv_postprocess_blksize_on_receive |
4 | 0 | 0 |
func |
yield_req_data |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.base (10 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KVConnectorBase_V1.init |
4 | 3 | 0 |
meth |
KVConnectorBase_V1.register_kv_caches |
2 | 1 | 0 |
meth |
KVConnectorBase_V1.register_cross_layers_kv_cache |
3 | 2 | 0 |
meth |
KVConnectorBase_V1.set_host_xfer_buffer_ops |
2 | 1 | 0 |
meth |
KVConnectorBase_V1.handle_preemptions |
2 | 1 | 0 |
meth |
KVConnectorBase_V1.start_load_kv |
3 | 3 | 1 |
meth |
KVConnectorBase_V1.save_kv_layer |
5 | 5 | 1 |
meth |
KVConnectorBase_V1.wait_for_save |
1 | 0 | 0 |
meth |
KVConnectorBase_V1.shutdown |
1 | 0 | 0 |
meth |
KVConnectorBase_V1.update_state_after_alloc |
4 | 3 | 0 |
meth |
KVConnectorBase_V1.update_connector_output |
2 | 1 | 0 |
func |
supports_hma |
2 | 2 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.decode_bench_connector (18 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DecodeBenchConnector.init |
4 | 3 | 0 |
meth |
DecodeBenchConnector.register_kv_caches |
2 | 1 | 0 |
meth |
DecodeBenchConnector.start_load_kv |
3 | 3 | 1 |
meth |
DecodeBenchConnector.save_kv_layer |
5 | 5 | 1 |
meth |
DecodeBenchConnector.wait_for_save |
1 | 0 | 0 |
meth |
DecodeBenchConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
DecodeBenchConnectorScheduler.init |
2 | 1 | 0 |
meth |
DecodeBenchConnectorScheduler.update_state_after_alloc |
4 | 3 | 0 |
meth |
DecodeBenchConnectorScheduler.request_finished |
2 | 1 | 0 |
attr |
DecodeBenchConnectorScheduler.vllm_config |
1 | 0 | 0 |
attr |
DecodeBenchConnectorScheduler.block_size |
1 | 0 | 0 |
meth |
DecodeBenchConnectorWorker.init |
2 | 1 | 0 |
meth |
DecodeBenchConnectorWorker.register_kv_caches |
2 | 1 | 0 |
meth |
DecodeBenchConnectorWorker.start_fill_kv |
2 | 1 | 0 |
meth |
DecodeBenchConnectorWorker._fill_blocks |
4 | 3 | 0 |
attr |
DecodeBenchConnectorWorker.vllm_config |
1 | 0 | 0 |
attr |
DecodeBenchConnectorWorker.block_size |
1 | 0 | 0 |
attr |
DecodeBenchConnectorWorker.fill_mean |
1 | 0 | 0 |
attr |
DecodeBenchConnectorWorker.fill_std |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.example_connector (6 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExampleConnector.init |
4 | 3 | 0 |
meth |
ExampleConnector.start_load_kv |
3 | 3 | 1 |
meth |
ExampleConnector.save_kv_layer |
5 | 5 | 1 |
meth |
ExampleConnector.wait_for_save |
1 | 0 | 0 |
meth |
ExampleConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
ExampleConnector._generate_foldername_debug |
4 | 3 | 0 |
func |
align_to_block_size |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.example_hidden_states_connector (7 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExampleHiddenStatesConnector.init |
4 | 3 | 0 |
meth |
ExampleHiddenStatesConnector.start_load_kv |
3 | 2 | 1 |
meth |
ExampleHiddenStatesConnector.wait_for_save |
1 | 0 | 0 |
meth |
ExampleHiddenStatesConnector.register_kv_caches |
2 | 1 | 0 |
meth |
ExampleHiddenStatesConnector.save_kv_layer |
5 | 5 | 1 |
meth |
ExampleHiddenStatesConnector.update_state_after_alloc |
4 | 3 | 0 |
attr |
ExampleHiddenStatesConnector.num_hidden_states |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.lmcache_connector (6 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
LMCacheConnectorV1.init |
4 | 3 | 0 |
meth |
LMCacheConnectorV1.register_kv_caches |
2 | 1 | 0 |
meth |
LMCacheConnectorV1.start_load_kv |
3 | 3 | 1 |
meth |
LMCacheConnectorV1.save_kv_layer |
5 | 5 | 1 |
meth |
LMCacheConnectorV1.wait_for_save |
1 | 0 | 0 |
meth |
LMCacheConnectorV1.update_state_after_alloc |
4 | 3 | 0 |
meth |
LMCacheConnectorV1.update_connector_output |
2 | 1 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.lmcache_integration.multi_process_adapter (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LMCacheMPWorkerAdapter.init |
7 | 6 | 0 |
meth |
LMCacheMPWorkerAdapter.register_kv_caches |
2 | 1 | 0 |
meth |
LMCacheMPWorkerAdapter.submit_store_request |
4 | 3 | 0 |
meth |
LMCacheMPWorkerAdapter.submit_retrieve_request |
4 | 3 | 0 |
meth |
LMCacheMPWorkerAdapter.batched_submit_store_requests |
4 | 3 | 0 |
meth |
LMCacheMPWorkerAdapter.batched_submit_retrieve_requests |
4 | 3 | 0 |
meth |
LMCacheMPWorkerAdapter.shutdown |
1 | 0 | 0 |
attr |
LMCacheMPWorkerAdapter.mq_client |
1 | 0 | 0 |
attr |
LMCacheMPWorkerAdapter.instance_id |
1 | 0 | 0 |
attr |
LMCacheMPWorkerAdapter.model_name |
1 | 0 | 0 |
attr |
LMCacheMPWorkerAdapter.world_size |
1 | 0 | 0 |
attr |
LMCacheMPWorkerAdapter.worker_id |
1 | 0 | 0 |
attr |
LMCacheMPWorkerAdapter.blocks_in_chunk |
1 | 0 | 0 |
meth |
LMCacheMPSchedulerAdapter.init |
7 | 6 | 0 |
attr |
LMCacheMPSchedulerAdapter.mq_client |
1 | 0 | 0 |
attr |
LMCacheMPSchedulerAdapter.model_name |
1 | 0 | 0 |
attr |
LMCacheMPSchedulerAdapter.world_size |
1 | 0 | 0 |
attr |
LMCacheMPSchedulerAdapter.worker_id |
1 | 0 | 0 |
attr |
LMCacheMPSchedulerAdapter.chunk_size |
1 | 0 | 0 |
attr |
LMCacheMPSchedulerAdapter.blocks_in_chunk |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.lmcache_integration.utils (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
create_lmcache_metadata |
5 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.lmcache_integration.vllm_v1_adapter (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
need_gpu_interm_buffer |
2 | 1 | 0 |
meth |
LMCacheConnectorV1Impl.init |
4 | 3 | 0 |
meth |
LMCacheConnectorV1Impl._init_kv_caches_from_forward_context |
2 | 1 | 0 |
meth |
LMCacheConnectorV1Impl.register_kv_caches |
2 | 1 | 0 |
meth |
LMCacheConnectorV1Impl.start_load_kv |
3 | 2 | 0 |
meth |
LMCacheConnectorV1Impl.save_kv_layer |
5 | 4 | 0 |
meth |
LMCacheConnectorV1Impl.wait_for_save |
1 | 0 | 0 |
meth |
LMCacheConnectorV1Impl.update_state_after_alloc |
3 | 2 | 0 |
attr |
LMCacheConnectorV1Impl.kv_role |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.worker_count |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.config |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.async_loading |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.skip_last_n_tokens |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.num_layers |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.current_layer |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.force_skip_save |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.lookup_client |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.lmcache_engine |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.api_server |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.plugin_launcher |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.use_layerwise |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.enable_blending |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.lookup_server |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.offload_server |
1 | 0 | 0 |
attr |
LMCacheConnectorV1Impl.blender |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.lmcache_mp_connector (19 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LMCacheMPRequestTracker.init |
2 | 1 | 0 |
meth |
LMCacheMPRequestTracker.increase_num_scheduled_tokens |
2 | 1 | 0 |
meth |
LMCacheMPRequestTracker.increase_num_stored_blocks |
2 | 1 | 0 |
meth |
LMCacheMPRequestTracker.append_block_ids |
2 | 1 | 0 |
meth |
LMCacheMPConnectorMetadata.init |
1 | 0 | 0 |
meth |
LMCacheMPConnectorMetadata.add_request_metadata |
2 | 1 | 0 |
meth |
LMCacheMPConnectorMetadata.len |
1 | 0 | 0 |
meth |
LMCacheMPConnectorMetadata.str |
1 | 0 | 0 |
meth |
LMCacheMPConnectorMetadata.repr |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
LMCacheMPConnector.init |
4 | 3 | 0 |
meth |
LMCacheMPConnector.register_kv_caches |
2 | 1 | 0 |
meth |
LMCacheMPConnector.start_load_kv |
3 | 3 | 1 |
meth |
LMCacheMPConnector.save_kv_layer |
5 | 5 | 1 |
meth |
LMCacheMPConnector.wait_for_save |
1 | 0 | 0 |
meth |
LMCacheMPConnector.shutdown |
1 | 0 | 0 |
meth |
LMCacheMPConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
LMCacheMPConnector.update_connector_output |
2 | 1 | 0 |
attr |
LMCacheMPConnector.vllm_block_size |
1 | 0 | 0 |
attr |
LMCacheMPConnector.scheduler_adapter |
1 | 0 | 0 |
attr |
LMCacheMPConnector.worker_adapter |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.metrics (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KVConnectorPromMetrics.init |
5 | 4 | 0 |
meth |
KVConnectorPromMetrics.observe |
3 | 2 | 0 |
attr |
KVConnectorPromMetrics.per_engine_labelvalues |
1 | 0 | 0 |
meth |
KVConnectorLogging.init |
2 | 1 | 0 |
meth |
KVConnectorLogging.reset |
1 | 0 | 0 |
meth |
KVConnectorLogging.observe |
2 | 1 | 0 |
meth |
KVConnectorLogging.log |
2 | 0 | 0 |
attr |
KVConnectorLogging.connector_cls |
1 | 0 | 0 |
meth |
KVConnectorStats.reset |
1 | 0 | 0 |
meth |
KVConnectorPrometheus.init |
4 | 3 | 0 |
meth |
KVConnectorPrometheus.observe |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.mooncake.mooncake_connector (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MooncakeConnectorMetadata.init |
1 | 0 | 0 |
meth |
MooncakeConnectorMetadata.add_new_req |
5 | 4 | 0 |
meth |
MooncakeConnectorWorker.init |
3 | 2 | 0 |
meth |
MooncakeConnectorWorker.del |
1 | 0 | 0 |
meth |
MooncakeConnectorWorker.shutdown |
1 | 0 | 0 |
meth |
MooncakeConnectorWorker.register_worker_with_bootstrap |
1 | 0 | 0 |
meth |
MooncakeConnectorWorker._mooncake_sender_listener |
2 | 1 | 0 |
meth |
MooncakeConnectorWorker._sender_worker |
2 | 1 | 0 |
meth |
MooncakeConnectorWorker.send_kv_to_decode |
4 | 3 | 0 |
meth |
MooncakeConnectorWorker.resolve_need_send |
3 | 2 | 0 |
meth |
MooncakeConnectorWorker.register_kv_caches |
2 | 1 | 0 |
meth |
MooncakeConnectorWorker.receive_kv_from_single_worker |
3 | 2 | 0 |
meth |
MooncakeConnectorWorker.process_pulling_result |
3 | 2 | 0 |
meth |
MooncakeConnectorWorker._connect_to_prefiller_bootstrap |
2 | 1 | 0 |
meth |
MooncakeConnectorWorker.receive_kv |
3 | 2 | 0 |
meth |
MooncakeConnectorWorker.handle_new_engine_id |
3 | 2 | 0 |
meth |
MooncakeConnectorWorker._start_load_kv |
2 | 1 | 0 |
meth |
MooncakeConnectorWorker.record_send_reqs |
2 | 1 | 0 |
meth |
MooncakeConnectorWorker.start_load_kv |
2 | 1 | 0 |
attr |
MooncakeConnectorWorker.vllm_config |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.engine |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.hostname |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.num_sender_workers |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.num_sender_tasks |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.rpc_port |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.tp_rank |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.tp_size |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.num_blocks |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.dp_rank |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.pp_rank |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.block_size |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.model_config |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.cache_config |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.use_mla |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.backend_name |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.kv_cache_layout |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.kv_topo |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.async_zmq_ctx |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.sender_worker_queue |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.sender_loop |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.receiver_loop |
1 | 0 | 0 |
attr |
MooncakeConnectorWorker.bootstrap_server |
1 | 0 | 0 |
meth |
MooncakeConnectorScheduler.init |
3 | 2 | 0 |
meth |
MooncakeConnectorScheduler.update_state_after_alloc |
4 | 3 | 0 |
attr |
MooncakeConnectorScheduler.vllm_config |
1 | 0 | 0 |
meth |
MooncakeConnector.init |
4 | 3 | 0 |
meth |
MooncakeConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
MooncakeConnector.register_kv_caches |
2 | 1 | 0 |
meth |
MooncakeConnector.start_load_kv |
3 | 2 | 0 |
meth |
MooncakeConnector.save_kv_layer |
5 | 4 | 0 |
meth |
MooncakeConnector.wait_for_save |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.mooncake.mooncake_utils (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MooncakeBootstrapServer.init |
4 | 3 | 0 |
meth |
MooncakeBootstrapServer.del |
1 | 0 | 0 |
meth |
MooncakeBootstrapServer._register_routes |
1 | 0 | 0 |
meth |
MooncakeBootstrapServer.start |
1 | 0 | 0 |
meth |
MooncakeBootstrapServer.shutdown |
1 | 0 | 0 |
meth |
MooncakeBootstrapServer.register_worker |
2 | 1 | 0 |
attr |
MooncakeBootstrapServer.host |
1 | 0 | 0 |
attr |
MooncakeBootstrapServer.port |
1 | 0 | 0 |
attr |
MooncakeBootstrapServer.app |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_common (7 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
zmq_ctx |
3 | 3 | 1 |
attr |
logger |
1 | 0 | 0 |
func |
set_role |
2 | 1 | 0 |
attr |
RoleManager._lock |
1 | 0 | 0 |
meth |
MoRIIOConnectorMetadata.init |
1 | 0 | 0 |
meth |
MoRIIOConnectorMetadata.repr |
1 | 0 | 0 |
meth |
MoRIIOConnectorMetadata.add_new_req |
5 | 3 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_connector (84 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoRIIOConnectorWorker.init |
3 | 2 | 0 |
meth |
MoRIIOConnectorWorker._get_built_session |
2 | 0 | 0 |
meth |
MoRIIOConnectorWorker._ping |
2 | 0 | 0 |
meth |
MoRIIOConnectorWorker.shutdown |
1 | 0 | 0 |
meth |
MoRIIOConnectorWorker.del |
1 | 0 | 0 |
meth |
MoRIIOConnectorWorker._moriio_handshake_listener |
7 | 6 | 0 |
meth |
MoRIIOConnectorWorker._background_moriio_handshake |
4 | 3 | 0 |
meth |
MoRIIOConnectorWorker.register_kv_caches |
2 | 1 | 0 |
meth |
MoRIIOConnectorWorker.save_kv_layer |
6 | 4 | 0 |
meth |
MoRIIOConnectorWorker.get_engine_name_with_dp |
3 | 0 | 0 |
meth |
MoRIIOConnectorWorker.start_load_kv |
2 | 1 | 0 |
meth |
MoRIIOConnectorWorker._read_blocks_for_req |
3 | 2 | 0 |
meth |
MoRIIOConnectorWorker._write_blocks_for_req |
5 | 2 | 0 |
meth |
MoRIIOConnectorWorker._is_last_layer |
2 | 0 | 0 |
attr |
MoRIIOConnectorWorker.moriio_config |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.mode |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.vllm_config |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.kv_transfer_config |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.is_producer |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.tp_rank |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.dp_rank |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.local_ip |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.local_kv_port |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.proxy_ip |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.local_ping_port |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.proxy_ping_port |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.http_port |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.handshake_port |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.notify_port |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.zmq_context |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.metadata_address |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.request_address |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.moriio_engine |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.moriio_wrapper |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.slot_size_bytes |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.kv_cache_shape |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.block_shape |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.kv_element_size |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.world_size |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.tp_group |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.num_regions |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.num_layers |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.block_size |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.model_config |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.cache_config |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.use_mla |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.built_session |
1 | 0 | 0 |
attr |
MoRIIOConnectorWorker.backend_name |
1 | 0 | 0 |
meth |
MoRIIOConnector.init |
4 | 3 | 0 |
meth |
MoRIIOConnector._set_port_defaults |
2 | 1 | 0 |
meth |
MoRIIOConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
MoRIIOConnector.register_kv_caches |
2 | 1 | 0 |
meth |
MoRIIOConnector.start_load_kv |
3 | 2 | 0 |
meth |
MoRIIOConnector.save_kv_layer |
5 | 4 | 0 |
meth |
MoRIIOConnector.wait_for_save |
1 | 0 | 0 |
meth |
MoRIIOConnector.shutdown |
1 | 0 | 0 |
attr |
MoRIIOConnector.kv_transfer_config |
1 | 0 | 0 |
attr |
MoRIIOConnector.engine_id |
1 | 0 | 0 |
attr |
MoRIIOConnector.mode |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
MoRIIOConnectorScheduler.init |
3 | 2 | 0 |
meth |
MoRIIOConnectorScheduler.send_notify_block |
5 | 2 | 0 |
meth |
MoRIIOConnectorScheduler.update_state_after_alloc |
5 | 4 | 0 |
meth |
MoRIIOConnectorScheduler.shutdown |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.vllm_config |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.kv_transfer_config |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.block_size |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.mode |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.host_ip |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.handshake_port |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.side_notify_port |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.tp_size |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.dp_rank |
1 | 0 | 0 |
attr |
MoRIIOConnectorScheduler.is_producer |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.moriio.moriio_engine (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoRIIOWrapper.init |
4 | 3 | 0 |
meth |
MoRIIOWrapper.set_moriio_engine |
2 | 0 | 0 |
meth |
MoRIIOWrapper.set_backend_type |
2 | 0 | 0 |
meth |
MoRIIOWrapper.get_agent_metadata |
1 | 0 | 0 |
meth |
MoRIIOWrapper.register_remote_engine |
2 | 0 | 0 |
meth |
MoRIIOWrapper.register_local_tensor |
2 | 1 | 0 |
meth |
MoRIIOWrapper.get_unpack_memory_metadata |
2 | 0 | 0 |
meth |
MoRIIOWrapper.build_session |
3 | 0 | 0 |
meth |
MoRIIOWrapper.read_remote_data |
5 | 0 | 0 |
meth |
MoRIIOWrapper.write_remote_data |
5 | 0 | 0 |
meth |
MoRIIOWrapper.write_remote_data_single |
5 | 0 | 0 |
meth |
MoRIIOWrapper.waiting_for_transfer_complete |
1 | 0 | 0 |
meth |
MoRIIOWrapper.async_wait_reqid |
1 | 0 | 0 |
meth |
MoRIIOWrapper._handle_message |
2 | 1 | 0 |
meth |
MoRIIOWrapper._handle_structured_message |
2 | 1 | 0 |
meth |
MoRIIOWrapper._handle_completion_message |
2 | 1 | 0 |
meth |
MoRIIOWrapper.send_notify |
4 | 0 | 0 |
meth |
MoRIIOWrapper.pop_finished_req_ids |
1 | 0 | 0 |
meth |
MoRIIOWrapper.pop_finished_write_req_ids |
1 | 0 | 0 |
meth |
MoRIIOWrapper.shutdown |
1 | 0 | 0 |
attr |
MoRIIOWrapper.tp_rank |
1 | 0 | 0 |
attr |
MoRIIOWrapper.dp_rank |
1 | 0 | 0 |
attr |
MoRIIOWrapper.moriio_engine |
1 | 0 | 0 |
attr |
MoRIIOWrapper.remote_memory_metadata |
1 | 0 | 0 |
attr |
MoRIIOWrapper.local_memory_registered |
1 | 0 | 0 |
attr |
MoRIIOWrapper.local_memory_metadata |
1 | 0 | 0 |
attr |
MoRIIOWrapper.lock |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
MoRIIOWriter.init |
2 | 1 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.multi_connector (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MultiKVConnectorStats.reset |
1 | 0 | 0 |
meth |
MultiKVConnectorStats.setitem |
3 | 2 | 0 |
meth |
MultiConnector.init |
4 | 3 | 0 |
meth |
MultiConnector.register_cross_layers_kv_cache |
3 | 2 | 0 |
meth |
MultiConnector.register_kv_caches |
2 | 1 | 0 |
meth |
MultiConnector.shutdown |
1 | 0 | 0 |
meth |
MultiConnector.start_load_kv |
3 | 2 | 0 |
meth |
MultiConnector.save_kv_layer |
5 | 4 | 0 |
meth |
MultiConnector.wait_for_save |
1 | 0 | 0 |
meth |
MultiConnector.set_host_xfer_buffer_ops |
2 | 1 | 0 |
meth |
MultiConnector.handle_preemptions |
2 | 1 | 0 |
meth |
MultiConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
MultiConnector.update_connector_output |
2 | 1 | 0 |
meth |
MultiKVConnectorPromMetrics.init |
6 | 5 | 0 |
meth |
MultiKVConnectorPromMetrics.observe |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector (85 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NixlPromMetrics.init |
5 | 4 | 0 |
meth |
NixlPromMetrics.observe |
3 | 2 | 0 |
attr |
NixlPromMetrics.nixl_histogram_xfer_time |
1 | 0 | 0 |
attr |
NixlPromMetrics.nixl_histogram_post_time |
1 | 0 | 0 |
attr |
NixlPromMetrics.nixl_histogram_bytes_transferred |
1 | 0 | 0 |
attr |
NixlPromMetrics.nixl_histogram_num_descriptors |
1 | 0 | 0 |
attr |
NixlPromMetrics.counter_nixl_num_failed_transfers |
1 | 0 | 0 |
attr |
NixlPromMetrics.counter_nixl_num_failed_notifications |
1 | 0 | 0 |
attr |
NixlPromMetrics.counter_nixl_num_kv_expired_reqs |
1 | 0 | 0 |
func |
zmq_ctx |
3 | 3 | 1 |
meth |
NixlKVConnectorStats.post_init |
1 | 0 | 0 |
meth |
NixlKVConnectorStats.reset |
1 | 0 | 0 |
meth |
NixlKVConnectorStats.record_transfer |
2 | 1 | 0 |
meth |
NixlKVConnectorStats.record_failed_transfer |
1 | 0 | 0 |
meth |
NixlKVConnectorStats.record_failed_notification |
1 | 0 | 0 |
meth |
NixlKVConnectorStats.record_kv_expired_req |
1 | 0 | 0 |
meth |
NixlConnector.init |
4 | 3 | 0 |
meth |
NixlConnector.get_required_kvcache_layout |
2 | 1 | 0 |
meth |
NixlConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
NixlConnector.register_kv_caches |
2 | 1 | 0 |
meth |
NixlConnector.register_cross_layers_kv_cache |
3 | 2 | 0 |
meth |
NixlConnector.set_host_xfer_buffer_ops |
2 | 1 | 0 |
meth |
NixlConnector.start_load_kv |
3 | 2 | 0 |
meth |
NixlConnector.save_kv_layer |
5 | 4 | 0 |
meth |
NixlConnector.wait_for_save |
1 | 0 | 0 |
meth |
NixlConnector.shutdown |
1 | 0 | 0 |
attr |
NixlConnector.kv_transfer_config |
1 | 0 | 0 |
meth |
NixlConnectorMetadata.init |
1 | 0 | 0 |
meth |
NixlConnectorMetadata.add_new_req_to_save |
4 | 3 | 0 |
meth |
NixlConnectorMetadata.add_new_req_to_recv |
4 | 3 | 0 |
meth |
NixlConnectorWorker.init |
3 | 2 | 0 |
meth |
NixlConnectorWorker.set_host_xfer_buffer_ops |
2 | 1 | 0 |
meth |
NixlConnectorWorker._log_failure |
7 | 5 | 0 |
meth |
NixlConnectorWorker._background_nixl_handshake |
4 | 3 | 0 |
meth |
NixlConnectorWorker.register_kv_caches |
2 | 1 | 0 |
meth |
NixlConnectorWorker._validate_remote_agent_handshake |
3 | 2 | 0 |
meth |
NixlConnectorWorker.sync_recved_kv_to_device |
3 | 2 | 0 |
meth |
NixlConnectorWorker.save_kv_to_host |
2 | 1 | 0 |
meth |
NixlConnectorWorker.post_process_device_kv_on_receive |
3 | 2 | 0 |
meth |
NixlConnectorWorker._handle_failed_transfer |
3 | 2 | 0 |
meth |
NixlConnectorWorker.start_load_kv |
2 | 1 | 0 |
meth |
NixlConnectorWorker._read_blocks_for_req |
3 | 2 | 0 |
meth |
NixlConnectorWorker._read_blocks |
9 | 8 | 0 |
meth |
NixlConnectorWorker.get_mapped_blocks |
3 | 0 | 0 |
meth |
NixlConnectorWorker.del |
1 | 0 | 0 |
meth |
NixlConnectorWorker.shutdown |
1 | 0 | 0 |
attr |
NixlConnectorWorker.vllm_config |
1 | 0 | 0 |
attr |
NixlConnectorWorker.block_size |
1 | 0 | 0 |
attr |
NixlConnectorWorker.kv_transfer_config |
1 | 0 | 0 |
attr |
NixlConnectorWorker.nixl_backends |
1 | 0 | 0 |
attr |
NixlConnectorWorker.nixl_wrapper |
1 | 0 | 0 |
attr |
NixlConnectorWorker.tp_rank |
1 | 0 | 0 |
attr |
NixlConnectorWorker.world_size |
1 | 0 | 0 |
attr |
NixlConnectorWorker.tp_group |
1 | 0 | 0 |
attr |
NixlConnectorWorker.num_blocks |
1 | 0 | 0 |
attr |
NixlConnectorWorker.enable_permute_local_kv |
1 | 0 | 0 |
attr |
NixlConnectorWorker.device_type |
1 | 0 | 0 |
attr |
NixlConnectorWorker.nixl_memory_type |
1 | 0 | 0 |
attr |
NixlConnectorWorker.kv_caches_base_addr |
1 | 0 | 0 |
attr |
NixlConnectorWorker.num_regions |
1 | 0 | 0 |
attr |
NixlConnectorWorker.num_layers |
1 | 0 | 0 |
attr |
NixlConnectorWorker.dst_xfer_side_handles |
1 | 0 | 0 |
attr |
NixlConnectorWorker.model_config |
1 | 0 | 0 |
attr |
NixlConnectorWorker.cache_config |
1 | 0 | 0 |
attr |
NixlConnectorWorker.use_mla |
1 | 0 | 0 |
attr |
NixlConnectorWorker.attn_backend |
1 | 0 | 0 |
attr |
NixlConnectorWorker.backend_name |
1 | 0 | 0 |
attr |
NixlConnectorWorker.kv_cache_layout |
1 | 0 | 0 |
attr |
NixlConnectorWorker.host_buffer_kv_cache_layout |
1 | 0 | 0 |
attr |
NixlConnectorWorker.consumer_notification_counts_by_req |
1 | 0 | 0 |
attr |
NixlConnectorWorker.xfer_stats |
1 | 0 | 0 |
attr |
NixlConnectorWorker.enforce_compat_hash |
1 | 0 | 0 |
attr |
NixlConnectorWorker.use_host_buffer |
1 | 0 | 0 |
meth |
NixlConnectorScheduler.init |
3 | 2 | 0 |
meth |
NixlConnectorScheduler.shutdown |
1 | 0 | 0 |
meth |
NixlConnectorScheduler._nixl_handshake_listener |
5 | 4 | 0 |
meth |
NixlConnectorScheduler.update_state_after_alloc |
4 | 3 | 0 |
attr |
NixlConnectorScheduler.vllm_config |
1 | 0 | 0 |
attr |
NixlConnectorScheduler.block_size |
1 | 0 | 0 |
attr |
NixlConnectorScheduler.side_channel_host |
1 | 0 | 0 |
attr |
NixlConnectorScheduler.side_channel_port |
1 | 0 | 0 |
attr |
NixlConnectorScheduler.use_host_buffer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.offloading_connector (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
OffloadingConnectorScheduler.init |
2 | 1 | 0 |
meth |
OffloadingConnectorScheduler.update_state_after_alloc |
4 | 3 | 0 |
meth |
OffloadingConnectorScheduler._get_reqs_to_store |
2 | 1 | 0 |
meth |
OffloadingConnectorScheduler.update_connector_output |
2 | 1 | 0 |
attr |
OffloadingConnectorScheduler.gpu_block_size |
1 | 0 | 0 |
attr |
OffloadingConnectorScheduler.offloaded_block_size |
1 | 0 | 0 |
attr |
OffloadingConnectorScheduler.block_size_factor |
1 | 0 | 0 |
meth |
OffloadingConnector.init |
4 | 3 | 0 |
meth |
OffloadingConnector.register_kv_caches |
2 | 1 | 0 |
meth |
OffloadingConnector.register_cross_layers_kv_cache |
3 | 2 | 0 |
meth |
OffloadingConnector.handle_preemptions |
2 | 1 | 0 |
meth |
OffloadingConnector.start_load_kv |
3 | 2 | 0 |
meth |
OffloadingConnector.save_kv_layer |
5 | 4 | 0 |
meth |
OffloadingConnector.wait_for_save |
1 | 0 | 0 |
meth |
OffloadingConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
OffloadingConnector.update_connector_output |
2 | 1 | 0 |
meth |
OffloadingConnectorWorker.init |
2 | 1 | 0 |
meth |
OffloadingConnectorWorker._register_handlers |
3 | 2 | 0 |
meth |
OffloadingConnectorWorker.register_kv_caches |
2 | 1 | 0 |
meth |
OffloadingConnectorWorker.register_cross_layers_kv_cache |
3 | 2 | 0 |
meth |
OffloadingConnectorWorker.handle_preemptions |
2 | 1 | 0 |
meth |
OffloadingConnectorWorker.start_kv_transfers |
2 | 1 | 0 |
meth |
OffloadingConnectorWorker.prepare_store_kv |
2 | 1 | 0 |
attr |
OffloadingConnectorWorker.spec |
1 | 0 | 0 |
attr |
OffloadingConnectorWorker.worker |
1 | 0 | 0 |
attr |
OffloadingConnectorWorker.kv_connector_stats |
1 | 0 | 0 |
meth |
OffloadingConnectorStats.post_init |
1 | 0 | 0 |
meth |
OffloadingConnectorStats.reset |
1 | 0 | 0 |
meth |
OffloadingConnectorStats.record_transfer |
4 | 3 | 0 |
meth |
OffloadPromMetrics.init |
5 | 4 | 0 |
meth |
OffloadPromMetrics.observe |
3 | 2 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.p2p.p2p_nccl_connector (12 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
P2pNcclConnectorMetadata.init |
1 | 0 | 0 |
meth |
P2pNcclConnector.init |
4 | 3 | 0 |
meth |
P2pNcclConnector.start_load_kv |
3 | 3 | 1 |
meth |
P2pNcclConnector.save_kv_layer |
5 | 5 | 1 |
meth |
P2pNcclConnector.wait_for_save |
1 | 0 | 0 |
meth |
P2pNcclConnector.get_finished |
3 | 3 | 1 |
meth |
P2pNcclConnector.update_state_after_alloc |
4 | 3 | 0 |
meth |
P2pNcclConnector.parse_request_id |
3 | 2 | 0 |
meth |
P2pNcclConnector.check_tensors_except_dim |
4 | 0 | 0 |
attr |
P2pNcclConnector.is_producer |
1 | 0 | 0 |
attr |
P2pNcclConnector.p2p_nccl_engine |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.p2p.p2p_nccl_engine (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
P2pNcclEngine.create_connect |
2 | 1 | 0 |
meth |
P2pNcclEngine.listen_for_requests |
1 | 0 | 0 |
meth |
P2pNcclEngine.have_sent_tensor_id |
2 | 1 | 0 |
meth |
P2pNcclEngine.have_received_tensor_id |
2 | 1 | 0 |
meth |
P2pNcclEngine.send_async |
1 | 0 | 0 |
meth |
P2pNcclEngine.wait_for_sent |
1 | 0 | 0 |
meth |
P2pNcclEngine.get_finished |
3 | 2 | 0 |
meth |
P2pNcclEngine.ping |
1 | 0 | 0 |
meth |
P2pNcclEngine.send |
5 | 2 | 0 |
meth |
P2pNcclEngine.recv |
5 | 2 | 0 |
attr |
P2pNcclEngine.config |
1 | 0 | 0 |
attr |
P2pNcclEngine.rank |
1 | 0 | 0 |
attr |
P2pNcclEngine.local_rank |
1 | 0 | 0 |
attr |
P2pNcclEngine.device |
1 | 0 | 0 |
attr |
P2pNcclEngine.nccl |
1 | 0 | 0 |
attr |
P2pNcclEngine.zmq_address |
1 | 0 | 0 |
attr |
P2pNcclEngine.context |
1 | 0 | 0 |
attr |
P2pNcclEngine.router_socket |
1 | 0 | 0 |
attr |
P2pNcclEngine.poller |
1 | 0 | 0 |
attr |
P2pNcclEngine.send_store_cv |
1 | 0 | 0 |
attr |
P2pNcclEngine.send_queue_cv |
1 | 0 | 0 |
attr |
P2pNcclEngine.recv_store_cv |
1 | 0 | 0 |
attr |
P2pNcclEngine.send_stream |
1 | 0 | 0 |
attr |
P2pNcclEngine.recv_stream |
1 | 0 | 0 |
attr |
P2pNcclEngine.pool |
1 | 0 | 0 |
attr |
P2pNcclEngine.send_type |
1 | 0 | 0 |
attr |
P2pNcclEngine.buffer_size |
1 | 0 | 0 |
attr |
P2pNcclEngine.buffer_size_threshold |
1 | 0 | 0 |
attr |
P2pNcclEngine.nccl_num_channels |
1 | 0 | 0 |
attr |
P2pNcclEngine.proxy_address |
1 | 0 | 0 |
attr |
P2pNcclEngine.http_address |
1 | 0 | 0 |
func |
set_p2p_nccl_context |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.distributed.kv_transfer.kv_connector.v1.p2p.tensor_memory_pool (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
TensorMemoryPool.init |
3 | 2 | 0 |
meth |
TensorMemoryPool._initialize_free_lists |
1 | 0 | 0 |
meth |
TensorMemoryPool._allocate_pinned_memory |
1 | 0 | 0 |
meth |
TensorMemoryPool._split_block |
3 | 2 | 0 |
meth |
TensorMemoryPool.free |
2 | 1 | 0 |
meth |
TensorMemoryPool._merge_buddies |
2 | 1 | 0 |
meth |
TensorMemoryPool.cleanup |
1 | 0 | 0 |
meth |
TensorMemoryPool.del |
1 | 0 | 0 |
attr |
TensorMemoryPool.max_block_size |
1 | 0 | 0 |
attr |
TensorMemoryPool.min_block_size |
1 | 0 | 0 |
vllm.distributed.parallel_state (38 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
model_parallel_is_initialized |
1 | 0 | 0 |
func |
destroy_distributed_environment |
1 | 0 | 0 |
func |
graph_capture |
2 | 1 | 0 |
func |
patch_tensor_parallel_group |
2 | 1 | 0 |
func |
set_custom_all_reduce |
2 | 1 | 0 |
func |
init_distributed_environment |
7 | 6 | 0 |
meth |
GroupCoordinator.init |
7 | 6 | 0 |
meth |
GroupCoordinator.create_mq_broadcaster |
4 | 0 | 0 |
meth |
GroupCoordinator.create_single_reader_mq_broadcasters |
3 | 0 | 0 |
meth |
GroupCoordinator.graph_capture |
2 | 1 | 0 |
meth |
GroupCoordinator.all_gatherv |
4 | 3 | 0 |
meth |
GroupCoordinator.broadcast |
3 | 2 | 0 |
meth |
GroupCoordinator.broadcast_object |
3 | 2 | 0 |
meth |
GroupCoordinator.broadcast_object_list |
4 | 3 | 0 |
meth |
GroupCoordinator.send_object |
3 | 3 | 1 |
meth |
GroupCoordinator.recv_object |
2 | 2 | 1 |
meth |
GroupCoordinator.barrier |
1 | 0 | 0 |
meth |
GroupCoordinator.destroy |
1 | 0 | 0 |
meth |
GroupCoordinator.prepare_communication_buffer_for_model |
2 | 1 | 0 |
meth |
GroupCoordinator.combine |
3 | 2 | 0 |
prop |
GroupCoordinator.first_rank |
1 | 0 | 0 |
prop |
GroupCoordinator.last_rank |
1 | 0 | 0 |
prop |
GroupCoordinator.is_first_rank |
1 | 0 | 0 |
prop |
GroupCoordinator.is_last_rank |
1 | 0 | 0 |
prop |
GroupCoordinator.next_rank |
1 | 0 | 0 |
prop |
GroupCoordinator.prev_rank |
1 | 0 | 0 |
attr |
GroupCoordinator.unique_name |
1 | 0 | 0 |
attr |
GroupCoordinator.use_device_communicator |
1 | 0 | 0 |
attr |
GroupCoordinator.use_custom_op_call |
1 | 0 | 0 |
attr |
GroupCoordinator.use_cpu_custom_send_recv |
1 | 0 | 0 |
attr |
GroupCoordinator.device |
1 | 0 | 0 |
func |
cleanup_dist_env_and_memory |
2 | 1 | 0 |
func |
prepare_communication_buffer_for_model |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
destroy_model_parallel |
1 | 0 | 0 |
vllm.distributed.stateless_coordinator (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
StatelessGroupCoordinator.init |
11 | 10 | 0 |
meth |
StatelessGroupCoordinator.destroy |
1 | 0 | 0 |
meth |
StatelessGroupCoordinator.broadcast |
3 | 2 | 0 |
meth |
StatelessGroupCoordinator.broadcast_object |
3 | 1 | 0 |
meth |
StatelessGroupCoordinator.broadcast_object_list |
4 | 3 | 0 |
meth |
StatelessGroupCoordinator.send_object |
3 | 2 | 0 |
meth |
StatelessGroupCoordinator.recv_object |
2 | 1 | 0 |
meth |
StatelessGroupCoordinator.barrier |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.unique_name |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.rank |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.local_rank |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.backend |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.cpu_group |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.device_group |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.tcp_store_group |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.use_device_communicator |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.device_communicator |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.mq_broadcaster |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.use_custom_op_call |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.use_cpu_custom_send_recv |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.device |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.ranks |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.world_size |
1 | 0 | 0 |
attr |
StatelessGroupCoordinator.rank_in_group |
1 | 0 | 0 |
vllm.distributed.utils (14 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
sched_yield |
1 | 0 | 0 |
meth |
StatelessProcessGroup.post_init |
1 | 0 | 0 |
meth |
StatelessProcessGroup.send_obj |
3 | 2 | 1 |
meth |
StatelessProcessGroup.expire_data |
1 | 0 | 0 |
meth |
StatelessProcessGroup.recv_obj |
2 | 2 | 1 |
meth |
StatelessProcessGroup.broadcast_obj |
3 | 3 | 1 |
meth |
StatelessProcessGroup.all_gather_obj |
2 | 2 | 1 |
meth |
StatelessProcessGroup.send |
3 | 2 | 0 |
meth |
StatelessProcessGroup.all_reduce |
3 | 2 | 0 |
meth |
StatelessProcessGroup.barrier |
2 | 1 | 0 |
func |
ensure_divisibility |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
divide |
3 | 0 | 0 |
vllm.distributed.weight_transfer.base (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
WeightTransferEngine.config |
1 | 0 | 0 |
attr |
WeightTransferEngine.parallel_config |
1 | 0 | 0 |
vllm.distributed.weight_transfer.factory (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.distributed.weight_transfer.ipc_engine (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IPCTrainerSendWeightsArgs.post_init |
1 | 0 | 0 |
meth |
IPCWeightTransferUpdateInfo.post_init |
1 | 0 | 0 |
vllm.distributed.weight_transfer.nccl_engine (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NCCLWeightTransferEngine._stateless_init_process_group |
6 | 0 | 0 |
meth |
NCCLWeightTransferUpdateInfo.post_init |
1 | 0 | 0 |
vllm.distributed.weight_transfer.packed_tensor (0 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
packed_broadcast_consumer |
7 | 7 | 1 |
func |
packed_broadcast_producer |
7 | 7 | 1 |
vllm.engine.arg_utils (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EngineArgs.post_init |
1 | 0 | 0 |
meth |
EngineArgs.from_cli_args |
2 | 1 | 0 |
meth |
EngineArgs.validate_tensorizer_args |
1 | 0 | 0 |
meth |
EngineArgs._check_feature_supported |
1 | 0 | 0 |
meth |
EngineArgs._set_default_max_num_seqs_and_batched_tokens_args |
3 | 2 | 0 |
attr |
NEEDS_HELP |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.engine.protocol (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EngineClient.collective_rpc |
5 | 4 | 0 |
vllm.entrypoints.anthropic.api_router (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
attach_router |
2 | 1 | 0 |
func |
create_messages |
3 | 2 | 0 |
func |
count_tokens |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.anthropic.protocol (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AnthropicTool.validate_input_schema |
2 | 0 | 0 |
meth |
AnthropicMessagesRequest.validate_model |
2 | 0 | 0 |
meth |
AnthropicMessagesRequest.validate_max_tokens |
2 | 0 | 0 |
meth |
AnthropicMessagesResponse.model_post_init |
2 | 0 | 0 |
meth |
AnthropicCountTokensRequest.validate_model |
2 | 0 | 0 |
vllm.entrypoints.anthropic.serving (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
wrap_data_with_event |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
AnthropicServingMessages.init |
13 | 12 | 0 |
meth |
AnthropicServingMessages._convert_message_content |
4 | 3 | 0 |
meth |
AnthropicServingMessages._convert_block |
7 | 6 | 0 |
meth |
AnthropicServingMessages._convert_tool_use_block |
3 | 2 | 0 |
meth |
AnthropicServingMessages._convert_tool_result_block |
5 | 4 | 0 |
meth |
AnthropicServingMessages._convert_user_tool_result |
3 | 2 | 0 |
attr |
AnthropicServingMessages.stop_reason_map |
1 | 0 | 0 |
vllm.entrypoints.api_server (4 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
app |
1 | 0 | 0 |
attr |
parser |
1 | 0 | 0 |
attr |
args |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
run_server |
4 | 4 | 1 |
vllm.entrypoints.chat_utils (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseMultiModalItemTracker.init |
2 | 1 | 0 |
prop |
BaseMultiModalItemTracker.allowed_local_media_path |
1 | 0 | 0 |
prop |
BaseMultiModalItemTracker.allowed_media_domains |
1 | 0 | 0 |
prop |
BaseMultiModalItemTracker.mm_registry |
1 | 0 | 0 |
prop |
BaseMultiModalItemTracker.mm_processor |
1 | 0 | 0 |
attr |
PILImage.model_config |
1 | 0 | 0 |
meth |
AsyncMultiModalContentParser._image_with_uuid_async |
3 | 2 | 0 |
meth |
AsyncMultiModalContentParser._audio_with_uuid_async |
3 | 2 | 0 |
meth |
AsyncMultiModalContentParser._video_with_uuid_async |
3 | 2 | 0 |
func |
validate_chat_template |
2 | 1 | 0 |
attr |
torch |
1 | 0 | 0 |
func |
make_tool_call_id |
4 | 1 | 0 |
meth |
BaseMultiModalContentParser._add_placeholder |
3 | 2 | 0 |
func |
get_history_tool_calls_cnt |
2 | 1 | 0 |
attr |
transformers |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.cli.main (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
main |
1 | 0 | 0 |
vllm.entrypoints.cli.run_batch (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.cli.serve (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
run_multi_api_server |
2 | 1 | 0 |
func |
run_headless |
2 | 1 | 0 |
func |
run_api_server_worker_proc |
6 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.grpc_server (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
VllmEngineServicer.init |
3 | 2 | 0 |
attr |
VllmEngineServicer.async_llm |
1 | 0 | 0 |
attr |
VllmEngineServicer.start_time |
1 | 0 | 0 |
func |
main |
1 | 0 | 0 |
func |
serve_grpc |
2 | 1 | 0 |
vllm.entrypoints.launcher (4 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
serve_http |
5 | 4 | 1 |
attr |
logger |
1 | 0 | 0 |
func |
watchdog_loop |
3 | 2 | 0 |
func |
terminate_if_errored |
3 | 2 | 0 |
vllm.entrypoints.llm (17 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LLM.init |
38 | 38 | 1 |
meth |
LLM.sleep |
3 | 2 | 0 |
meth |
LLM.wake_up |
2 | 1 | 0 |
meth |
LLM._run_completion |
8 | 7 | 0 |
meth |
LLM._run_chat |
14 | 13 | 0 |
meth |
LLM._render_and_run_requests |
7 | 6 | 0 |
attr |
LLM.llm_engine |
1 | 0 | 0 |
attr |
LLM.engine_class |
1 | 0 | 0 |
attr |
LLM.request_counter |
1 | 0 | 0 |
attr |
LLM.supported_tasks |
1 | 0 | 0 |
attr |
LLM.model_config |
1 | 0 | 0 |
attr |
LLM.renderer |
1 | 0 | 0 |
attr |
LLM.chat_template |
1 | 0 | 0 |
attr |
LLM.io_processor |
1 | 0 | 0 |
attr |
LLM.input_processor |
1 | 0 | 0 |
attr |
LLM.chat_template_config |
1 | 0 | 0 |
attr |
LLM.init_pooling_io_processors |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.logger (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
RequestLogger.max_log_len |
1 | 0 | 0 |
vllm.entrypoints.mcp.tool (9 missing, 8 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HarmonyPythonTool.init |
1 | 0 | 0 |
meth |
HarmonyPythonTool.validate |
1 | 0 | 0 |
meth |
HarmonyPythonTool.get_result |
2 | 2 | 1 |
meth |
HarmonyPythonTool.get_result_parsable_context |
2 | 2 | 1 |
prop |
HarmonyPythonTool.tool_config |
1 | 1 | 1 |
attr |
HarmonyPythonTool.enabled |
1 | 0 | 0 |
attr |
HarmonyPythonTool.python_tool |
1 | 0 | 0 |
meth |
HarmonyBrowserTool.init |
1 | 0 | 0 |
meth |
HarmonyBrowserTool.get_result |
2 | 2 | 1 |
meth |
HarmonyBrowserTool.get_result_parsable_context |
2 | 2 | 1 |
prop |
HarmonyBrowserTool.tool_config |
1 | 1 | 1 |
attr |
HarmonyBrowserTool.enabled |
1 | 0 | 0 |
attr |
HarmonyBrowserTool.browser_tool |
1 | 0 | 0 |
func |
validate_gpt_oss_install |
1 | 0 | 0 |
meth |
Tool.get_result |
2 | 2 | 1 |
meth |
Tool.get_result_parsable_context |
2 | 2 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.mcp.tool_server (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
list_server_and_tools |
2 | 1 | 0 |
meth |
DemoToolServer.init |
1 | 0 | 0 |
meth |
DemoToolServer.init_and_validate |
1 | 0 | 0 |
meth |
DemoToolServer.new_session |
4 | 3 | 0 |
meth |
MCPToolServer.init |
1 | 0 | 0 |
meth |
MCPToolServer.add_tool_server |
2 | 1 | 0 |
meth |
MCPToolServer.has_tool |
2 | 1 | 0 |
meth |
MCPToolServer.new_session |
4 | 3 | 0 |
attr |
MCPToolServer.harmony_tool_descriptions |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.api_server (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
parser |
1 | 0 | 0 |
func |
validate_api_server_args |
2 | 0 | 0 |
func |
setup_server |
2 | 0 | 0 |
attr |
args |
1 | 0 | 0 |
func |
run_server_worker |
6 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
run_server |
3 | 1 | 0 |
vllm.entrypoints.openai.chat_completion.api_router (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
render_chat_completion |
3 | 2 | 0 |
func |
create_chat_completion |
3 | 2 | 0 |
func |
attach_router |
2 | 1 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.openai.chat_completion.protocol (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
ChatCompletionRequest.validate_response_format |
2 | 0 | 0 |
meth |
ChatCompletionRequest.validate_stream_options |
2 | 0 | 0 |
meth |
ChatCompletionRequest.check_logprobs |
2 | 0 | 0 |
meth |
ChatCompletionRequest.check_structured_outputs_count |
2 | 0 | 0 |
meth |
ChatCompletionRequest.check_tool_usage |
2 | 0 | 0 |
meth |
ChatCompletionRequest.check_generation_prompt |
2 | 0 | 0 |
meth |
ChatCompletionRequest.check_cache_salt_support |
2 | 0 | 0 |
meth |
ChatCompletionRequest.check_system_message_content_type |
2 | 0 | 0 |
vllm.entrypoints.openai.chat_completion.serving (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
OpenAIServingChat._bracket_level |
4 | 2 | 0 |
meth |
OpenAIServingChat._should_stream_with_auto_tool_parsing |
2 | 1 | 0 |
meth |
OpenAIServingChat._make_request_with_harmony |
3 | 2 | 0 |
attr |
OpenAIServingChat.response_role |
1 | 0 | 0 |
attr |
OpenAIServingChat.chat_template |
1 | 0 | 0 |
attr |
OpenAIServingChat.trust_request_chat_template |
1 | 0 | 0 |
attr |
OpenAIServingChat.default_chat_template_kwargs |
1 | 0 | 0 |
attr |
OpenAIServingChat.enable_log_outputs |
1 | 0 | 0 |
attr |
OpenAIServingChat.enable_log_deltas |
1 | 0 | 0 |
attr |
OpenAIServingChat.reasoning_parser_cls |
1 | 0 | 0 |
attr |
OpenAIServingChat.tool_parser |
1 | 0 | 0 |
attr |
OpenAIServingChat.exclude_tools_when_tool_choice_none |
1 | 0 | 0 |
attr |
OpenAIServingChat.enable_prompt_tokens_details |
1 | 0 | 0 |
attr |
OpenAIServingChat.enable_force_include_usage |
1 | 0 | 0 |
attr |
OpenAIServingChat.default_sampling_params |
1 | 0 | 0 |
attr |
OpenAIServingChat.override_max_tokens |
1 | 0 | 0 |
attr |
OpenAIServingChat.use_harmony |
1 | 0 | 0 |
attr |
OpenAIServingChat.supports_browsing |
1 | 0 | 0 |
attr |
OpenAIServingChat.browser_tool |
1 | 0 | 0 |
attr |
OpenAIServingChat.supports_code_interpreter |
1 | 0 | 0 |
attr |
OpenAIServingChat.python_tool |
1 | 0 | 0 |
attr |
OpenAIServingChat.tool_call_id_type |
1 | 0 | 0 |
vllm.entrypoints.openai.cli_args (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LoRAParserAction.call |
5 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
validate_parsed_serve_args |
2 | 1 | 0 |
vllm.entrypoints.openai.completion.api_router (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
render_completion |
3 | 2 | 0 |
func |
attach_router |
2 | 1 | 0 |
func |
create_completion |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.openai.completion.protocol (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
CompletionRequest.validate_response_format |
2 | 0 | 0 |
meth |
CompletionRequest.check_structured_outputs_count |
2 | 0 | 0 |
meth |
CompletionRequest.check_logprobs |
2 | 0 | 0 |
meth |
CompletionRequest.validate_stream_options |
2 | 0 | 0 |
meth |
CompletionRequest.validate_prompt_and_prompt_embeds |
2 | 0 | 0 |
meth |
CompletionRequest.check_cache_salt_support |
2 | 0 | 0 |
vllm.entrypoints.openai.completion.serving (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OpenAIServingCompletion.init |
8 | 7 | 0 |
attr |
OpenAIServingCompletion.enable_prompt_tokens_details |
1 | 0 | 0 |
attr |
OpenAIServingCompletion.enable_force_include_usage |
1 | 0 | 0 |
attr |
OpenAIServingCompletion.default_sampling_params |
1 | 0 | 0 |
attr |
OpenAIServingCompletion.override_max_tokens |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.engine.protocol (6 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LogitsProcessorConstructor.model_config |
1 | 0 | 0 |
attr |
StructuralTagResponseFormat.format |
1 | 1 | 1 |
meth |
OpenAIBaseModel.log_extra_fields |
3 | 0 | 0 |
attr |
OpenAIBaseModel.model_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.engine.serving (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GenerationError.init |
2 | 1 | 0 |
attr |
GenerationError.status_code |
1 | 0 | 0 |
attr |
ServeContext.model_config |
1 | 0 | 0 |
meth |
OpenAIServing.init |
6 | 5 | 0 |
meth |
OpenAIServing._extract_prompt_components |
2 | 1 | 0 |
meth |
OpenAIServing._extract_prompt_text |
2 | 1 | 0 |
meth |
OpenAIServing._extract_prompt_len |
2 | 1 | 0 |
meth |
OpenAIServing._render_next_turn |
7 | 6 | 0 |
meth |
OpenAIServing._generate_with_builtin_tools |
8 | 7 | 0 |
attr |
OpenAIServing.engine_client |
1 | 0 | 0 |
attr |
OpenAIServing.models |
1 | 0 | 0 |
attr |
OpenAIServing.request_logger |
1 | 0 | 0 |
attr |
OpenAIServing.return_tokens_as_token_ids |
1 | 0 | 0 |
attr |
OpenAIServing.log_error_stack |
1 | 0 | 0 |
attr |
OpenAIServing.model_config |
1 | 0 | 0 |
attr |
OpenAIServing.renderer |
1 | 0 | 0 |
attr |
OpenAIServing.io_processor |
1 | 0 | 0 |
attr |
OpenAIServing.input_processor |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.generate.api_router (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
register_generate_api_routers |
2 | 1 | 0 |
func |
init_generate_state |
6 | 5 | 0 |
vllm.entrypoints.openai.models.api_router (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
router |
1 | 0 | 0 |
func |
attach_router |
2 | 1 | 0 |
func |
show_available_models |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.models.serving (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
OpenAIServingModels.init |
4 | 3 | 0 |
meth |
OpenAIServingModels.init_static_loras |
1 | 0 | 0 |
meth |
OpenAIServingModels.is_base_model |
2 | 1 | 0 |
attr |
OpenAIServingModels.engine_client |
1 | 0 | 0 |
attr |
OpenAIServingModels.base_model_paths |
1 | 0 | 0 |
attr |
OpenAIServingModels.static_lora_modules |
1 | 0 | 0 |
attr |
OpenAIServingModels.lora_id_counter |
1 | 0 | 0 |
attr |
OpenAIServingModels.model_config |
1 | 0 | 0 |
attr |
OpenAIServingModels.renderer |
1 | 0 | 0 |
attr |
OpenAIServingModels.io_processor |
1 | 0 | 0 |
attr |
OpenAIServingModels.input_processor |
1 | 0 | 0 |
vllm.entrypoints.openai.orca_metrics (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.parser.harmony_utils (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_encoding |
1 | 0 | 0 |
func |
create_tool_definition |
2 | 1 | 0 |
func |
parse_chat_input_to_harmony_message |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.parser.responses_parser (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ResponsesParser.init |
6 | 5 | 0 |
attr |
ResponsesParser.num_init_messages |
1 | 0 | 0 |
attr |
ResponsesParser.tokenizer |
1 | 0 | 0 |
attr |
ResponsesParser.request |
1 | 0 | 0 |
attr |
ResponsesParser.reasoning_parser_instance |
1 | 0 | 0 |
attr |
ResponsesParser.tool_parser_instance |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
get_responses_parser_for_simple_context |
6 | 5 | 0 |
vllm.entrypoints.openai.realtime.api_router (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
realtime_endpoint |
2 | 1 | 0 |
func |
attach_router |
2 | 1 | 0 |
func |
init_realtime_state |
6 | 5 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.openai.realtime.connection (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RealtimeConnection.init |
3 | 2 | 0 |
meth |
RealtimeConnection.handle_connection |
1 | 0 | 0 |
meth |
RealtimeConnection.handle_event |
2 | 1 | 0 |
meth |
RealtimeConnection.start_generation |
1 | 0 | 0 |
meth |
RealtimeConnection._run_generation |
3 | 2 | 0 |
meth |
RealtimeConnection.send |
2 | 1 | 0 |
meth |
RealtimeConnection.send_error |
3 | 2 | 0 |
meth |
RealtimeConnection.cleanup |
1 | 0 | 0 |
attr |
RealtimeConnection.websocket |
1 | 0 | 0 |
attr |
RealtimeConnection.connection_id |
1 | 0 | 0 |
attr |
RealtimeConnection.serving |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.realtime.serving (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
OpenAIServingRealtime.init |
5 | 4 | 0 |
vllm.entrypoints.openai.responses.api_router (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
attach_router |
2 | 1 | 0 |
func |
retrieve_responses |
5 | 4 | 0 |
func |
cancel_responses |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
func |
create_responses |
3 | 2 | 0 |
vllm.entrypoints.openai.responses.context (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
StreamingHarmonyContext.init |
3 | 0 | 0 |
attr |
StreamingHarmonyContext.last_output |
1 | 0 | 0 |
attr |
StreamingHarmonyContext.parser |
1 | 0 | 0 |
attr |
StreamingHarmonyContext.encoding |
1 | 0 | 0 |
attr |
StreamingHarmonyContext.last_tok |
1 | 0 | 0 |
attr |
StreamingHarmonyContext.first_tok_of_message |
1 | 0 | 0 |
attr |
StreamingHarmonyContext.last_content_delta |
1 | 0 | 0 |
meth |
ParsableContext.init |
9 | 8 | 0 |
meth |
ParsableContext.render_for_completion |
1 | 0 | 0 |
meth |
ParsableContext.init_tool_sessions |
5 | 4 | 0 |
meth |
ParsableContext.cleanup_session |
3 | 1 | 0 |
attr |
ParsableContext.num_prompt_tokens |
1 | 0 | 0 |
attr |
ParsableContext.num_output_tokens |
1 | 0 | 0 |
attr |
ParsableContext.num_cached_tokens |
1 | 0 | 0 |
attr |
ParsableContext.num_reasoning_tokens |
1 | 0 | 0 |
attr |
ParsableContext.parser |
1 | 0 | 0 |
attr |
ParsableContext.tool_parser_cls |
1 | 0 | 0 |
attr |
ParsableContext.request |
1 | 0 | 0 |
attr |
ParsableContext.available_tools |
1 | 0 | 0 |
attr |
ParsableContext.tool_dicts |
1 | 0 | 0 |
attr |
ParsableContext.chat_template |
1 | 0 | 0 |
meth |
SimpleContext.init |
1 | 0 | 0 |
meth |
SimpleContext.append_output |
2 | 1 | 0 |
meth |
SimpleContext.append_tool_output |
2 | 1 | 0 |
attr |
SimpleContext.last_output |
1 | 0 | 0 |
attr |
SimpleContext.num_prompt_tokens |
1 | 0 | 0 |
attr |
SimpleContext.num_output_tokens |
1 | 0 | 0 |
attr |
SimpleContext.num_cached_tokens |
1 | 0 | 0 |
attr |
SimpleContext.num_reasoning_tokens |
1 | 0 | 0 |
attr |
SimpleContext.all_turn_metrics |
1 | 0 | 0 |
meth |
ConversationContext.append_tool_output |
2 | 1 | 0 |
meth |
HarmonyContext.init |
3 | 2 | 0 |
meth |
HarmonyContext._update_num_reasoning_tokens |
1 | 0 | 0 |
meth |
HarmonyContext.init_tool_sessions |
5 | 4 | 0 |
meth |
HarmonyContext.cleanup_session |
3 | 1 | 0 |
attr |
HarmonyContext.available_tools |
1 | 0 | 0 |
attr |
HarmonyContext.parser |
1 | 0 | 0 |
attr |
HarmonyContext.num_init_messages |
1 | 0 | 0 |
attr |
HarmonyContext.num_prompt_tokens |
1 | 0 | 0 |
attr |
HarmonyContext.num_output_tokens |
1 | 0 | 0 |
attr |
HarmonyContext.num_cached_tokens |
1 | 0 | 0 |
attr |
HarmonyContext.num_reasoning_tokens |
1 | 0 | 0 |
attr |
HarmonyContext.num_tool_output_tokens |
1 | 0 | 0 |
attr |
HarmonyContext.current_turn_metrics |
1 | 0 | 0 |
attr |
HarmonyContext.is_first_turn |
1 | 0 | 0 |
attr |
HarmonyContext.first_tok_of_message |
1 | 0 | 0 |
attr |
TurnMetrics.input_tokens |
1 | 0 | 0 |
attr |
TurnMetrics.output_tokens |
1 | 0 | 0 |
attr |
TurnMetrics.cached_input_tokens |
1 | 0 | 0 |
attr |
TurnMetrics.tool_output_tokens |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.responses.harmony (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
response_previous_input_to_harmony |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.responses.protocol (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
serialize_messages |
2 | 0 | 0 |
meth |
ResponsesResponse.serialize_output_messages |
3 | 0 | 0 |
meth |
ResponsesResponse.serialize_input_messages |
3 | 0 | 0 |
meth |
ResponsesRequest.validate_background |
2 | 0 | 0 |
meth |
ResponsesRequest.validate_prompt |
2 | 0 | 0 |
meth |
ResponsesRequest.check_cache_salt_support |
2 | 0 | 0 |
meth |
ResponsesRequest.function_call_parsing |
2 | 0 | 0 |
func |
serialize_message |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.responses.serving (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
OpenAIServingResponses._make_request |
3 | 2 | 0 |
meth |
OpenAIServingResponses._make_request_with_harmony |
3 | 2 | 0 |
meth |
OpenAIServingResponses._initialize_tool_sessions |
4 | 3 | 0 |
meth |
OpenAIServingResponses._run_background_request_stream |
4 | 1 | 0 |
meth |
OpenAIServingResponses._run_background_request |
4 | 1 | 0 |
attr |
OpenAIServingResponses.chat_template |
1 | 0 | 0 |
attr |
OpenAIServingResponses.enable_log_outputs |
1 | 0 | 0 |
attr |
OpenAIServingResponses.parser |
1 | 0 | 0 |
attr |
OpenAIServingResponses.enable_prompt_tokens_details |
1 | 0 | 0 |
attr |
OpenAIServingResponses.enable_force_include_usage |
1 | 0 | 0 |
attr |
OpenAIServingResponses.default_sampling_params |
1 | 0 | 0 |
attr |
OpenAIServingResponses.override_max_tokens |
1 | 0 | 0 |
attr |
OpenAIServingResponses.enable_store |
1 | 0 | 0 |
attr |
OpenAIServingResponses.use_harmony |
1 | 0 | 0 |
attr |
OpenAIServingResponses.enable_auto_tools |
1 | 0 | 0 |
attr |
OpenAIServingResponses.response_store_lock |
1 | 0 | 0 |
attr |
OpenAIServingResponses.tool_server |
1 | 0 | 0 |
attr |
OpenAIServingResponses.tool_call_id_type |
1 | 0 | 0 |
vllm.entrypoints.openai.responses.utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
construct_input_messages |
5 | 4 | 0 |
vllm.entrypoints.openai.run_batch (13 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
parse_args |
1 | 0 | 0 |
attr |
args |
1 | 0 | 0 |
meth |
BatchRequestInput.check_type_for_url |
3 | 2 | 1 |
meth |
BatchTranslationRequest.validate_no_file |
2 | 1 | 1 |
meth |
BatchTranscriptionRequest.validate_no_file |
2 | 1 | 1 |
meth |
BatchProgressTracker.init |
1 | 0 | 0 |
meth |
BatchProgressTracker.submitted |
1 | 0 | 0 |
meth |
BatchProgressTracker.completed |
1 | 0 | 0 |
func |
make_arg_parser |
2 | 1 | 0 |
func |
main |
2 | 1 | 0 |
func |
validate_run_batch_args |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.server_utils (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SSEDecoder.init |
1 | 0 | 0 |
attr |
SSEDecoder.buffer |
1 | 0 | 0 |
attr |
SSEDecoder.content_buffer |
1 | 0 | 0 |
func |
validation_exception_handler |
3 | 2 | 0 |
attr |
AuthenticationMiddleware.app |
1 | 0 | 0 |
attr |
AuthenticationMiddleware.api_tokens |
1 | 0 | 0 |
func |
log_response |
3 | 1 | 0 |
func |
http_exception_handler |
3 | 2 | 0 |
func |
lifespan |
2 | 1 | 0 |
attr |
XRequestIdMiddleware.app |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.speech_to_text.api_router (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
create_translations |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
init_transcription_state |
6 | 5 | 0 |
func |
attach_router |
2 | 1 | 0 |
func |
create_transcriptions |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.openai.speech_to_text.protocol (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TranscriptionRequest.validate_transcription_request |
2 | 0 | 0 |
meth |
TranslationRequest.validate_stream_options |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.speech_to_text.serving (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OpenAIServingTranslation.init |
7 | 6 | 0 |
meth |
OpenAIServingTranscription.init |
7 | 6 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.openai.speech_to_text.speech_to_text (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OpenAISpeechToText.init |
8 | 7 | 0 |
meth |
OpenAISpeechToText._preprocess_verbose_prompt |
2 | 1 | 0 |
attr |
OpenAISpeechToText.default_sampling_params |
1 | 0 | 0 |
attr |
OpenAISpeechToText.asr_config |
1 | 0 | 0 |
attr |
OpenAISpeechToText.enable_force_include_usage |
1 | 0 | 0 |
attr |
OpenAISpeechToText.max_audio_filesize_mb |
1 | 0 | 0 |
attr |
OpenAISpeechToText.tokenizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
librosa |
1 | 0 | 0 |
vllm.entrypoints.openai.utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
validate_json_request |
2 | 1 | 0 |
vllm.entrypoints.pooling (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
init_pooling_state |
6 | 5 | 0 |
func |
register_pooling_api_routers |
3 | 2 | 0 |
vllm.entrypoints.pooling.base.io_processor (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolingIOProcessor.init |
4 | 3 | 0 |
meth |
PoolingIOProcessor.pre_process_online |
3 | 0 | 0 |
meth |
PoolingIOProcessor.pre_process_online_async |
3 | 0 | 0 |
meth |
PoolingIOProcessor.pre_process_offline |
3 | 0 | 0 |
meth |
PoolingIOProcessor.pre_process_offline_async |
3 | 0 | 0 |
meth |
PoolingIOProcessor.create_pooling_params |
2 | 0 | 0 |
meth |
PoolingIOProcessor._validate_chat_template |
4 | 3 | 0 |
attr |
PoolingIOProcessor.model_config |
1 | 0 | 0 |
attr |
PoolingIOProcessor.renderer |
1 | 0 | 0 |
attr |
PoolingIOProcessor.chat_template |
1 | 0 | 0 |
attr |
PoolingIOProcessor.trust_request_chat_template |
1 | 0 | 0 |
vllm.entrypoints.pooling.base.protocol (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChatRequestMixin.check_generation_prompt |
2 | 0 | 0 |
vllm.entrypoints.pooling.base.serving (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
PoolingServeContext.model_config |
1 | 0 | 0 |
meth |
PoolingServing.init |
9 | 8 | 0 |
meth |
PoolingServing._preprocess |
2 | 1 | 0 |
meth |
PoolingServing._prepare_generators |
2 | 1 | 0 |
meth |
PoolingServing._collect_batch |
2 | 1 | 0 |
meth |
PoolingServing._maybe_get_adapters |
3 | 2 | 0 |
attr |
PoolingServing.engine_client |
1 | 0 | 0 |
attr |
PoolingServing.models |
1 | 0 | 0 |
attr |
PoolingServing.model_config |
1 | 0 | 0 |
attr |
PoolingServing.max_model_len |
1 | 0 | 0 |
attr |
PoolingServing.request_logger |
1 | 0 | 0 |
attr |
PoolingServing.return_tokens_as_token_ids |
1 | 0 | 0 |
attr |
PoolingServing.log_error_stack |
1 | 0 | 0 |
attr |
PoolingServing.chat_template_config |
1 | 0 | 0 |
attr |
PoolingServing.io_processor |
1 | 0 | 0 |
vllm.entrypoints.pooling.classify.api_router (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.pooling.classify.protocol (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClassificationChatRequest.to_pooling_params |
1 | 0 | 0 |
meth |
ClassificationCompletionRequest.to_pooling_params |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.pooling.classify.serving (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.pooling.embed.api_router (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
router |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
create_embedding |
3 | 2 | 0 |
vllm.entrypoints.pooling.embed.protocol (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EmbeddingCompletionRequest.to_pooling_params |
1 | 0 | 0 |
meth |
EmbeddingChatRequest.to_pooling_params |
1 | 0 | 0 |
vllm.entrypoints.pooling.embed.serving (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
OpenAIServingEmbedding._should_use_chunked_processing |
2 | 1 | 0 |
attr |
OpenAIServingEmbedding.chat_template |
1 | 0 | 0 |
attr |
OpenAIServingEmbedding.trust_request_chat_template |
1 | 0 | 0 |
attr |
OpenAIServingEmbedding.pooler_config |
1 | 0 | 0 |
vllm.entrypoints.pooling.pooling.api_router (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
create_pooling |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.pooling.pooling.protocol (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolingChatRequest.to_pooling_params |
1 | 0 | 0 |
meth |
PoolingCompletionRequest.to_pooling_params |
1 | 0 | 0 |
vllm.entrypoints.pooling.pooling.serving (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
OpenAIServingPooling.chat_template |
1 | 0 | 0 |
attr |
OpenAIServingPooling.trust_request_chat_template |
1 | 0 | 0 |
vllm.entrypoints.pooling.score.api_router (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
do_rerank |
3 | 2 | 0 |
func |
create_score_v1 |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
func |
create_score |
3 | 2 | 0 |
func |
do_rerank_v1 |
3 | 2 | 0 |
func |
do_rerank_v2 |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.pooling.score.protocol (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RerankRequest.to_pooling_params |
2 | 1 | 0 |
prop |
ScoreTextRequest.data_1 |
1 | 0 | 0 |
prop |
ScoreTextRequest.data_2 |
1 | 0 | 0 |
prop |
ScoreQueriesItemsRequest.data_1 |
1 | 0 | 0 |
prop |
ScoreQueriesItemsRequest.data_2 |
1 | 0 | 0 |
meth |
ScoreRequestMixin.to_pooling_params |
2 | 1 | 0 |
prop |
ScoreQueriesDocumentsRequest.data_1 |
1 | 0 | 0 |
prop |
ScoreQueriesDocumentsRequest.data_2 |
1 | 0 | 0 |
vllm.entrypoints.pooling.score.serving (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
ServingScores.score_template |
1 | 0 | 0 |
attr |
ServingScores.use_gpu_for_pooling_score |
1 | 0 | 0 |
attr |
ServingScores.is_cross_encoder |
1 | 0 | 0 |
attr |
ServingScores.is_multimodal_model |
1 | 0 | 0 |
attr |
ServingScores.architecture |
1 | 0 | 0 |
attr |
ServingScores.is_late_interaction |
1 | 0 | 0 |
vllm.entrypoints.sagemaker.api_router (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_invocation_types |
2 | 1 | 0 |
func |
attach_router |
3 | 2 | 0 |
vllm.entrypoints.serve (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
register_vllm_serve_api_routers |
2 | 1 | 0 |
vllm.entrypoints.serve.cache.api_router (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
reset_mm_cache |
2 | 1 | 0 |
attr |
router |
1 | 0 | 0 |
func |
reset_prefix_cache |
4 | 3 | 0 |
func |
reset_encoder_cache |
2 | 1 | 0 |
func |
attach_router |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.serve.disagg.api_router (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
router |
1 | 0 | 0 |
func |
attach_router |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
generate |
3 | 2 | 0 |
vllm.entrypoints.serve.disagg.serving (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ServingTokens.init |
9 | 8 | 0 |
attr |
ServingTokens.enable_prompt_tokens_details |
1 | 0 | 0 |
attr |
ServingTokens.enable_log_outputs |
1 | 0 | 0 |
attr |
ServingTokens.force_no_detokenize |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.serve.elastic_ep.api_router (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
router |
1 | 0 | 0 |
func |
scale_elastic_ep |
2 | 1 | 0 |
func |
is_scaling_elastic_ep |
2 | 1 | 0 |
func |
attach_router |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.serve.elastic_ep.middleware (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ScalingMiddleware.app |
1 | 0 | 0 |
func |
get_scaling_elastic_ep |
1 | 0 | 0 |
func |
set_scaling_elastic_ep |
2 | 0 | 0 |
vllm.entrypoints.serve.instrumentator (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
register_instrumentator_api_routers |
2 | 1 | 0 |
vllm.entrypoints.serve.instrumentator.basic (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
get_server_load_metrics |
2 | 1 | 0 |
func |
show_version |
1 | 0 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.serve.instrumentator.health (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.serve.instrumentator.metrics (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
attach_router |
2 | 1 | 0 |
vllm.entrypoints.serve.instrumentator.offline_docs (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.serve.instrumentator.server_info (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
show_server_info |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
attr |
PydanticVllmConfig |
1 | 0 | 0 |
vllm.entrypoints.serve.lora.api_router (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
router |
1 | 0 | 0 |
func |
attach_router |
2 | 1 | 0 |
vllm.entrypoints.serve.profile.api_router (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
start_profile |
2 | 1 | 0 |
attr |
router |
1 | 0 | 0 |
func |
attach_router |
2 | 1 | 0 |
func |
stop_profile |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.serve.rlhf.api_router (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
init_weight_transfer_engine |
2 | 1 | 0 |
func |
update_weights |
2 | 1 | 0 |
func |
attach_router |
2 | 1 | 0 |
func |
get_world_size |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
vllm.entrypoints.serve.rpc.api_router (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
router |
1 | 0 | 0 |
func |
attach_router |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
collective_rpc |
2 | 1 | 0 |
vllm.entrypoints.serve.sleep.api_router (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
wake_up |
2 | 1 | 0 |
func |
is_sleeping |
2 | 1 | 0 |
func |
attach_router |
2 | 1 | 0 |
attr |
router |
1 | 0 | 0 |
func |
sleep |
2 | 1 | 0 |
vllm.entrypoints.serve.tokenize.api_router (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
tokenize |
3 | 2 | 0 |
attr |
router |
1 | 0 | 0 |
func |
detokenize |
3 | 2 | 0 |
func |
attach_router |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.serve.tokenize.protocol (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TokenizeChatRequest.check_generation_prompt |
2 | 0 | 0 |
attr |
TokenizerInfoResponse.model_config |
1 | 0 | 0 |
vllm.entrypoints.serve.tokenize.serving (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
OpenAIServingTokenization.chat_template |
1 | 0 | 0 |
attr |
OpenAIServingTokenization.trust_request_chat_template |
1 | 0 | 0 |
meth |
TokenizerInfo._make_json_serializable |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.ssl (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SSLCertRefresher._watch_files |
3 | 2 | 0 |
attr |
SSLCertRefresher.ssl |
1 | 0 | 0 |
attr |
SSLCertRefresher.key_path |
1 | 0 | 0 |
attr |
SSLCertRefresher.cert_path |
1 | 0 | 0 |
attr |
SSLCertRefresher.ca_path |
1 | 0 | 0 |
attr |
SSLCertRefresher.watch_ssl_cert_task |
1 | 0 | 0 |
attr |
SSLCertRefresher.watch_ssl_ca_task |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.entrypoints.utils (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
log_non_default_args |
2 | 1 | 0 |
func |
decrement_server_load |
2 | 1 | 0 |
func |
with_cancellation |
2 | 0 | 0 |
func |
cli_env_setup |
1 | 0 | 0 |
func |
load_aware_call |
2 | 0 | 0 |
vllm.env_override (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
should_partition_patched |
4 | 2 | 0 |
func |
memory_plan_reuse_patched |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
get_graph_partition_signature_patched |
4 | 1 | 0 |
vllm.envs (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_default_cache_root |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
get_default_config_root |
1 | 0 | 0 |
func |
is_set |
2 | 1 | 0 |
vllm.exceptions (3 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VLLMValidationError.init |
4 | 4 | 1 |
meth |
VLLMValidationError.str |
1 | 0 | 0 |
attr |
VLLMValidationError.parameter |
1 | 0 | 0 |
attr |
VLLMValidationError.value |
1 | 0 | 0 |
vllm.forward_context (7 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
create_forward_context |
11 | 10 | 1 |
func |
override_forward_context |
2 | 1 | 0 |
func |
set_forward_context |
11 | 10 | 1 |
meth |
DPMetadata.chunked_sizes |
4 | 3 | 0 |
meth |
DPMetadata.sp_local_sizes |
2 | 1 | 0 |
meth |
ForwardContext.post_init |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.grpc.compile_protos (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
compile_protos |
1 | 0 | 0 |
vllm.grpc.vllm_engine_pb2_grpc (95 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VllmEngineServicer.Generate |
3 | 0 | 0 |
meth |
VllmEngineServicer.Embed |
3 | 0 | 0 |
meth |
VllmEngineServicer.HealthCheck |
3 | 0 | 0 |
meth |
VllmEngineServicer.Abort |
3 | 0 | 0 |
meth |
VllmEngineServicer.GetModelInfo |
3 | 0 | 0 |
meth |
VllmEngineServicer.GetServerInfo |
3 | 0 | 0 |
meth |
VllmEngineStub.init |
2 | 0 | 0 |
attr |
VllmEngineStub.Generate |
1 | 0 | 0 |
attr |
VllmEngineStub.Embed |
1 | 0 | 0 |
attr |
VllmEngineStub.HealthCheck |
1 | 0 | 0 |
attr |
VllmEngineStub.Abort |
1 | 0 | 0 |
attr |
VllmEngineStub.GetModelInfo |
1 | 0 | 0 |
attr |
VllmEngineStub.GetServerInfo |
1 | 0 | 0 |
meth |
VllmEngine.Generate |
11 | 0 | 0 |
meth |
VllmEngine.Embed |
11 | 0 | 0 |
meth |
VllmEngine.HealthCheck |
11 | 0 | 0 |
meth |
VllmEngine.Abort |
11 | 0 | 0 |
meth |
VllmEngine.GetModelInfo |
11 | 0 | 0 |
meth |
VllmEngine.GetServerInfo |
11 | 0 | 0 |
func |
add_VllmEngineServicer_to_server |
3 | 0 | 0 |
vllm.inputs.data (0 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
DataPrompt.data |
1 | 1 | 1 |
vllm.inputs.preprocess (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
InputPreprocessor.model_config |
1 | 0 | 0 |
attr |
InputPreprocessor.renderer |
1 | 0 | 0 |
attr |
InputPreprocessor.mm_registry |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.kernels.helion.config_manager (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConfigSet.init |
2 | 1 | 0 |
meth |
ConfigManager.init |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.kernels.helion.ops.silu_mul_fp8 (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.kernels.helion.register (23 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
vllm_helion_lib |
1 | 0 | 0 |
meth |
ConfiguredHelionKernel.init |
5 | 4 | 0 |
meth |
ConfiguredHelionKernel.call |
3 | 0 | 0 |
meth |
ConfiguredHelionKernel._create_key_computer |
1 | 0 | 0 |
meth |
ConfiguredHelionKernel._create_config_selector |
2 | 0 | 0 |
attr |
ConfiguredHelionKernel.op_name |
1 | 0 | 0 |
attr |
ConfiguredHelionKernel.config_picker |
1 | 0 | 0 |
attr |
ConfiguredHelionKernel.raw_kernel_func |
1 | 0 | 0 |
attr |
ConfiguredHelionKernel.helion_settings |
1 | 0 | 0 |
meth |
HelionKernelWrapper.init |
5 | 4 | 0 |
meth |
HelionKernelWrapper.call |
3 | 0 | 0 |
meth |
HelionKernelWrapper._call_via_hop |
3 | 3 | 1 |
meth |
HelionKernelWrapper._partition_args |
4 | 4 | 1 |
meth |
HelionKernelWrapper._get_or_register_custom_op |
1 | 1 | 1 |
attr |
HelionKernelWrapper.raw_kernel_func |
1 | 0 | 0 |
attr |
HelionKernelWrapper.op_name |
1 | 0 | 0 |
attr |
HelionKernelWrapper.helion_settings |
1 | 0 | 0 |
meth |
PresetConfigSearch.init |
3 | 2 | 0 |
attr |
PresetConfigSearch.args |
1 | 0 | 0 |
attr |
PresetConfigSearch.config_selector |
1 | 0 | 0 |
func |
create_helion_decorated_kernel |
4 | 4 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.kernels.helion.utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.logger (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
enable_trace_function_call |
3 | 2 | 0 |
vllm.logging_utils.access_log_filter (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UvicornAccessLogFilter.init |
2 | 1 | 0 |
attr |
UvicornAccessLogFilter.excluded_paths |
1 | 0 | 0 |
vllm.logging_utils.dump_input (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
dump_engine_exception |
4 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
prepare_object_to_dump |
2 | 1 | 0 |
vllm.logging_utils.formatter (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColoredFormatter.init |
4 | 0 | 0 |
meth |
ColoredFormatter.format |
2 | 0 | 0 |
meth |
NewLineFormatter.init |
4 | 0 | 0 |
meth |
NewLineFormatter.format |
2 | 0 | 0 |
attr |
NewLineFormatter.use_relpath |
1 | 0 | 0 |
attr |
NewLineFormatter.root_dir |
1 | 0 | 0 |
vllm.logging_utils.log_time (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
logtime |
3 | 0 | 0 |
vllm.logits_process (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NoBadWordsLogitsProcessor.init |
2 | 1 | 0 |
attr |
NoBadWordsLogitsProcessor._SMALLEST_LOGIT |
1 | 0 | 0 |
attr |
NoBadWordsLogitsProcessor.bad_words_ids |
1 | 0 | 0 |
vllm.logprobs (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlatLogprobs.extend |
2 | 1 | 0 |
meth |
FlatLogprobs.setitem |
3 | 1 | 0 |
meth |
FlatLogprobs.delitem |
2 | 1 | 0 |
vllm.lora.layers.base (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseLayerWithLoRA.reset_lora |
2 | 1 | 0 |
meth |
BaseLayerWithLoRA.set_lora |
4 | 3 | 0 |
meth |
BaseLayerWithLoRA.set_mapping |
2 | 0 | 0 |
vllm.lora.layers.base_linear (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseLinearLayerWithLoRA.init |
2 | 1 | 0 |
meth |
BaseLinearLayerWithLoRA.reset_lora |
2 | 1 | 0 |
meth |
BaseLinearLayerWithLoRA.set_lora |
4 | 3 | 0 |
attr |
BaseLinearLayerWithLoRA.base_layer |
1 | 0 | 0 |
attr |
BaseLinearLayerWithLoRA.input_size |
1 | 0 | 0 |
attr |
BaseLinearLayerWithLoRA.tp_size |
1 | 0 | 0 |
attr |
BaseLinearLayerWithLoRA.tp_rank |
1 | 0 | 0 |
attr |
BaseLinearLayerWithLoRA.device |
1 | 0 | 0 |
vllm.lora.layers.column_parallel_linear (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
QKVParallelLinearWithLoRA.q_proj_total_size |
1 | 0 | 0 |
attr |
QKVParallelLinearWithLoRA.q_proj_shard_size |
1 | 0 | 0 |
attr |
QKVParallelLinearWithLoRA.kv_proj_shard_size |
1 | 0 | 0 |
attr |
QKVParallelLinearWithLoRA.kv_proj_total_size |
1 | 0 | 0 |
attr |
QKVParallelLinearWithLoRA.n_slices |
1 | 0 | 0 |
attr |
MergedQKVParallelLinearWithLoRA.n_slices |
1 | 0 | 0 |
attr |
MergedQKVParallelLinearWithLoRA.q_proj_shard_size |
1 | 0 | 0 |
attr |
MergedQKVParallelLinearWithLoRA.kv_proj_shard_size |
1 | 0 | 0 |
attr |
MergedQKVParallelLinearWithLoRA.q_shard_id |
1 | 0 | 0 |
attr |
MergedQKVParallelLinearWithLoRA.kv_shard_id |
1 | 0 | 0 |
attr |
MergedQKVParallelLinearWithLoRA.output_slices |
1 | 0 | 0 |
attr |
MergedQKVParallelLinearWithLoRA.output_ids |
1 | 0 | 0 |
meth |
MergedColumnParallelLinearWithLoRA.set_lora |
4 | 3 | 0 |
attr |
MergedColumnParallelLinearWithLoRA.output_slices |
1 | 0 | 0 |
attr |
MergedColumnParallelLinearWithLoRA.n_slices |
1 | 0 | 0 |
attr |
MergedColumnParallelLinearWithLoRA.output_ids |
1 | 0 | 0 |
attr |
ColumnParallelLinearWithLoRA.is_merged_col_linear |
1 | 0 | 0 |
attr |
ColumnParallelLinearWithLoRA.output_size |
1 | 0 | 0 |
attr |
ColumnParallelLinearWithLoRA.n_slices |
1 | 0 | 0 |
meth |
MergedColumnParallelLinearVariableSliceWithLoRA.set_lora |
4 | 3 | 0 |
vllm.lora.layers.fused_moe (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedMoE3DWithLoRA.init |
2 | 0 | 0 |
meth |
FusedMoE3DWithLoRA._create_lora_b_weights |
3 | 0 | 0 |
meth |
FusedMoE3DWithLoRA._slice_w13_b |
2 | 1 | 0 |
meth |
FusedMoE3DWithLoRA.set_lora |
4 | 3 | 0 |
prop |
FusedMoE3DWithLoRA.w13_input_size |
1 | 0 | 0 |
prop |
FusedMoE3DWithLoRA.w13_output_size |
1 | 0 | 0 |
prop |
FusedMoE3DWithLoRA.w2_input_size |
1 | 0 | 0 |
prop |
FusedMoE3DWithLoRA.w2_output_size |
1 | 0 | 0 |
meth |
FusedMoEWithLoRA._get_lora_moe_configs |
9 | 8 | 0 |
meth |
FusedMoEWithLoRA._inject_lora_into_fused_moe |
1 | 0 | 0 |
meth |
FusedMoEWithLoRA._create_lora_a_weights |
3 | 2 | 0 |
meth |
FusedMoEWithLoRA._create_lora_b_weights |
3 | 2 | 0 |
meth |
FusedMoEWithLoRA._slice_w13_b |
2 | 1 | 0 |
meth |
FusedMoEWithLoRA.reset_lora |
2 | 1 | 0 |
meth |
FusedMoEWithLoRA.set_lora |
4 | 3 | 0 |
meth |
FusedMoEWithLoRA.forward |
3 | 0 | 0 |
meth |
FusedMoEWithLoRA.maybe_all_reduce_tensor_model_parallel |
3 | 0 | 0 |
prop |
FusedMoEWithLoRA._shared_experts |
1 | 0 | 0 |
prop |
FusedMoEWithLoRA.quant_method |
1 | 0 | 0 |
attr |
FusedMoEWithLoRA.base_layer |
1 | 0 | 0 |
attr |
FusedMoEWithLoRA.tp_size |
1 | 0 | 0 |
attr |
FusedMoEWithLoRA.tp_rank |
1 | 0 | 0 |
attr |
FusedMoEWithLoRA.device |
1 | 0 | 0 |
vllm.lora.layers.logits_processor (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LogitsProcessorWithLoRA.reset_lora |
2 | 1 | 0 |
meth |
LogitsProcessorWithLoRA.set_lora |
4 | 3 | 0 |
meth |
LogitsProcessorWithLoRA.forward |
3 | 0 | 0 |
prop |
LogitsProcessorWithLoRA.logits_as_input |
1 | 0 | 0 |
prop |
LogitsProcessorWithLoRA.vocab_size |
1 | 0 | 0 |
prop |
LogitsProcessorWithLoRA.scale |
1 | 0 | 0 |
prop |
LogitsProcessorWithLoRA.soft_cap |
1 | 0 | 0 |
prop |
LogitsProcessorWithLoRA.use_all_gather |
1 | 0 | 0 |
prop |
LogitsProcessorWithLoRA.org_vocab_size |
1 | 0 | 0 |
prop |
LogitsProcessorWithLoRA.include_gpu_probs_tensor |
1 | 0 | 0 |
prop |
LogitsProcessorWithLoRA.should_modify_greedy_probs_inplace |
1 | 0 | 0 |
attr |
LogitsProcessorWithLoRA.base_layer |
1 | 0 | 0 |
attr |
LogitsProcessorWithLoRA.hidden_size |
1 | 0 | 0 |
attr |
LogitsProcessorWithLoRA.dtype |
1 | 0 | 0 |
attr |
LogitsProcessorWithLoRA.device |
1 | 0 | 0 |
attr |
LogitsProcessorWithLoRA.tp_size |
1 | 0 | 0 |
attr |
LogitsProcessorWithLoRA.tp_rank |
1 | 0 | 0 |
attr |
LogitsProcessorWithLoRA.sharded_to_full_mapping |
1 | 0 | 0 |
vllm.lora.layers.replicated_linear (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ReplicatedLinearWithLoRA.output_size |
1 | 0 | 0 |
attr |
ReplicatedLinearWithLoRA.n_slices |
1 | 0 | 0 |
vllm.lora.layers.row_parallel_linear (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
RowParallelLinearWithLoRA.input_size |
1 | 0 | 0 |
attr |
RowParallelLinearWithLoRA.output_size |
1 | 0 | 0 |
attr |
RowParallelLinearWithLoRA.n_slices |
1 | 0 | 0 |
vllm.lora.layers.utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LoRAMapping.post_init |
1 | 0 | 0 |
vllm.lora.layers.vocal_parallel_embedding (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VocabParallelEmbeddingWithLoRA.reset_lora |
2 | 1 | 0 |
meth |
VocabParallelEmbeddingWithLoRA.set_lora |
4 | 3 | 0 |
prop |
VocabParallelEmbeddingWithLoRA.weight |
1 | 0 | 0 |
attr |
VocabParallelEmbeddingWithLoRA.base_layer |
1 | 0 | 0 |
vllm.lora.lora_model (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LoRAModel.id |
1 | 0 | 0 |
attr |
LoRAModel.rank |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.lora.lora_weights (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LoRALayerWeights.module_name |
1 | 0 | 0 |
attr |
LoRALayerWeights.rank |
1 | 0 | 0 |
attr |
LoRALayerWeights.lora_alpha |
1 | 0 | 0 |
attr |
LoRALayerWeights.lora_a |
1 | 0 | 0 |
attr |
LoRALayerWeights.lora_b |
1 | 0 | 0 |
attr |
LoRALayerWeights.scaling |
1 | 0 | 0 |
attr |
PackedLoRALayerWeights.lora_alphas |
1 | 0 | 0 |
attr |
PackedLoRALayerWeights.scaling |
1 | 0 | 0 |
vllm.lora.model_manager (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LRUCacheLoRAModelManager.init |
8 | 7 | 0 |
meth |
LRUCacheLoRAModelManager._pin_lora_in_cpu_cache |
2 | 1 | 0 |
meth |
LRUCacheLoRAModelManager._pin_lora_in_gpu_cache |
2 | 1 | 0 |
func |
create_lora_manager |
10 | 9 | 0 |
meth |
AdapterLRUCache.init |
3 | 2 | 0 |
meth |
AdapterLRUCache._on_remove |
3 | 2 | 0 |
attr |
AdapterLRUCache.deactivate_fn |
1 | 0 | 0 |
meth |
LoRALRUCache.init |
3 | 2 | 0 |
meth |
LoRAModelManager.init |
8 | 7 | 0 |
meth |
LoRAModelManager._deactivate_adapter |
2 | 1 | 0 |
meth |
LoRAModelManager._add_adapter |
2 | 1 | 0 |
meth |
LoRAModelManager.remove_all_adapters |
1 | 0 | 0 |
meth |
LoRAModelManager._create_lora_modules |
1 | 0 | 0 |
meth |
LoRAModelManager.register_module |
3 | 2 | 0 |
meth |
LoRAModelManager._match_target_modules |
2 | 1 | 0 |
meth |
LoRAModelManager._stack_moe_lora_weights |
4 | 3 | 0 |
attr |
LoRAModelManager.supported_lora_modules |
1 | 0 | 0 |
attr |
LoRAModelManager.adapter_type |
1 | 0 | 0 |
attr |
LoRAModelManager.lora_config |
1 | 0 | 0 |
attr |
LoRAModelManager.device |
1 | 0 | 0 |
attr |
LoRAModelManager.max_num_seqs |
1 | 0 | 0 |
attr |
LoRAModelManager.max_num_batched_tokens |
1 | 0 | 0 |
attr |
LoRAModelManager.vocab_size |
1 | 0 | 0 |
attr |
LoRAModelManager.packed_modules_mapping |
1 | 0 | 0 |
attr |
LoRAModelManager.is_pooling_model |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.lora.ops.torch_ops.lora_ops (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
sgmv_shrink |
11 | 10 | 0 |
func |
bgmv_expand_slice |
8 | 7 | 0 |
func |
bgmv_shrink |
6 | 5 | 0 |
func |
sgmv_expand |
11 | 10 | 0 |
func |
sgmv_expand_slice |
13 | 12 | 0 |
func |
bgmv_expand |
6 | 5 | 0 |
vllm.lora.ops.triton_ops.kernel_utils (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
do_expand_kernel |
29 | 9 | 0 |
func |
do_shrink_kernel |
28 | 7 | 0 |
func |
mm_k |
16 | 9 | 0 |
vllm.lora.ops.triton_ops.lora_kernel_metadata (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LoRAKernelMeta._reset |
1 | 0 | 0 |
vllm.lora.ops.triton_ops.utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
is_batch_invariant |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.lora.ops.xpu_ops.lora_ops (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.lora.peft_helper (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
PEFTHelper.post_init |
1 | 0 | 0 |
vllm.lora.punica_wrapper.punica_base (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PunicaWrapperBase.init |
5 | 3 | 0 |
meth |
PunicaWrapperBase._update_base_metadata |
5 | 4 | 0 |
meth |
PunicaWrapperBase.update_metadata |
6 | 4 | 0 |
meth |
PunicaWrapperBase.add_shrink |
6 | 5 | 0 |
meth |
PunicaWrapperBase.add_expand |
8 | 6 | 0 |
meth |
PunicaWrapperBase.add_lora_embedding |
6 | 5 | 0 |
meth |
PunicaWrapperBase.add_lora_linear |
9 | 8 | 0 |
meth |
PunicaWrapperBase.add_lora_logits |
8 | 6 | 0 |
meth |
PunicaWrapperBase.add_lora_fused_moe |
18 | 14 | 0 |
attr |
PunicaWrapperBase.is_prefill |
1 | 0 | 0 |
attr |
PunicaWrapperBase.no_lora |
1 | 0 | 0 |
meth |
PunicaWrapperABC.update_metadata |
6 | 5 | 0 |
meth |
PunicaWrapperABC.add_shrink |
6 | 5 | 0 |
meth |
PunicaWrapperABC.add_expand |
8 | 6 | 0 |
meth |
PunicaWrapperABC.add_lora_embedding |
6 | 5 | 0 |
meth |
PunicaWrapperABC.add_lora_linear |
9 | 8 | 0 |
meth |
PunicaWrapperABC.add_lora_logits |
8 | 6 | 0 |
vllm.lora.punica_wrapper.punica_cpu (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PunicaWrapperCPU.init |
5 | 3 | 0 |
meth |
PunicaWrapperCPU._shrink_prefill |
5 | 4 | 0 |
meth |
PunicaWrapperCPU._shrink_decode |
5 | 4 | 0 |
meth |
PunicaWrapperCPU._expand_prefill |
5 | 4 | 0 |
meth |
PunicaWrapperCPU._expand_decode |
5 | 4 | 0 |
meth |
PunicaWrapperCPU._expand_slice_prefill |
7 | 6 | 0 |
meth |
PunicaWrapperCPU._expand_slice_decode |
7 | 6 | 0 |
meth |
PunicaWrapperCPU._apply_expand |
7 | 6 | 0 |
meth |
PunicaWrapperCPU._apply_shrink |
5 | 4 | 0 |
meth |
PunicaWrapperCPU.add_shrink |
6 | 4 | 0 |
meth |
PunicaWrapperCPU.add_expand |
8 | 6 | 0 |
meth |
PunicaWrapperCPU.add_lora_embedding |
6 | 5 | 0 |
meth |
PunicaWrapperCPU.add_lora_linear |
9 | 8 | 0 |
meth |
PunicaWrapperCPU.add_lora_logits |
8 | 6 | 0 |
vllm.lora.punica_wrapper.punica_gpu (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PunicaWrapperGPU.init |
5 | 3 | 0 |
meth |
PunicaWrapperGPU.update_metadata |
6 | 4 | 0 |
meth |
PunicaWrapperGPU.add_shrink |
6 | 4 | 0 |
meth |
PunicaWrapperGPU.add_expand |
8 | 6 | 0 |
meth |
PunicaWrapperGPU.add_lora_embedding |
6 | 5 | 0 |
meth |
PunicaWrapperGPU.add_lora_linear |
9 | 8 | 0 |
meth |
PunicaWrapperGPU.add_lora_logits |
8 | 6 | 0 |
meth |
PunicaWrapperGPU.add_lora_fused_moe |
18 | 14 | 0 |
attr |
PunicaWrapperGPU.lora_config |
1 | 0 | 0 |
attr |
PunicaWrapperGPU.max_loras |
1 | 0 | 0 |
attr |
PunicaWrapperGPU.token_mapping_meta |
1 | 0 | 0 |
attr |
PunicaWrapperGPU.prompt_mapping_meta |
1 | 0 | 0 |
vllm.lora.punica_wrapper.punica_selector (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_punica_wrapper |
3 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.lora.punica_wrapper.punica_xpu (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PunicaWrapperXPU.init |
5 | 3 | 0 |
meth |
PunicaWrapperXPU.update_metadata |
6 | 4 | 0 |
meth |
PunicaWrapperXPU._apply_shrink |
5 | 4 | 0 |
meth |
PunicaWrapperXPU._apply_expand |
7 | 6 | 0 |
meth |
PunicaWrapperXPU.add_shrink |
6 | 4 | 0 |
meth |
PunicaWrapperXPU.add_expand |
8 | 6 | 0 |
meth |
PunicaWrapperXPU.add_lora_embedding |
6 | 5 | 0 |
meth |
PunicaWrapperXPU.add_lora_linear |
9 | 8 | 0 |
meth |
PunicaWrapperXPU.add_lora_logits |
8 | 6 | 0 |
meth |
PunicaWrapperXPU.add_lora_fused_moe |
18 | 14 | 0 |
attr |
PunicaWrapperXPU.lora_config |
1 | 0 | 0 |
attr |
PunicaWrapperXPU.max_loras |
1 | 0 | 0 |
attr |
PunicaWrapperXPU.token_mapping_meta |
1 | 0 | 0 |
vllm.lora.request (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LoRARequest.post_init |
1 | 0 | 0 |
prop |
LoRARequest.adapter_id |
1 | 0 | 0 |
prop |
LoRARequest.name |
1 | 0 | 0 |
prop |
LoRARequest.path |
1 | 0 | 0 |
vllm.lora.resolver (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
LoRAResolverRegistry |
1 | 0 | 0 |
vllm.lora.utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_lora_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.lora.worker_manager (11 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
WorkerLoRAManager.init |
5 | 4 | 0 |
meth |
WorkerLoRAManager.dummy_lora_cache |
1 | 0 | 0 |
meth |
WorkerLoRAManager.create_lora_manager |
3 | 3 | 1 |
meth |
WorkerLoRAManager.add_adapter |
2 | 2 | 1 |
meth |
WorkerLoRAManager.remove_all_adapters |
1 | 0 | 0 |
attr |
WorkerLoRAManager.embedding_modules |
1 | 0 | 0 |
attr |
WorkerLoRAManager.max_num_seqs |
1 | 0 | 0 |
attr |
WorkerLoRAManager.max_num_batched_tokens |
1 | 0 | 0 |
attr |
WorkerLoRAManager.vocab_size |
1 | 0 | 0 |
attr |
WorkerLoRAManager.lora_config |
1 | 0 | 0 |
attr |
WorkerLoRAManager.max_position_embeddings |
1 | 0 | 0 |
attr |
WorkerLoRAManager.device |
1 | 0 | 0 |
meth |
LRUCacheWorkerLoRAManager.create_lora_manager |
3 | 3 | 1 |
vllm.model_executor.custom_op (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CustomOp.new |
3 | 0 | 0 |
meth |
CustomOp.init |
3 | 2 | 0 |
meth |
CustomOp.forward |
3 | 0 | 0 |
meth |
CustomOp.forward_native |
3 | 0 | 0 |
meth |
CustomOp.forward_cuda |
3 | 0 | 0 |
meth |
CustomOp.forward_hip |
3 | 0 | 0 |
meth |
CustomOp.forward_xpu |
3 | 0 | 0 |
meth |
CustomOp.forward_cpu |
3 | 0 | 0 |
meth |
CustomOp.forward_tpu |
3 | 0 | 0 |
meth |
CustomOp.forward_oot |
3 | 0 | 0 |
meth |
CustomOp.dispatch_forward |
2 | 1 | 0 |
meth |
CustomOp.maybe_compile |
3 | 1 | 0 |
meth |
CustomOp.register |
3 | 2 | 0 |
meth |
CustomOp.register_oot |
3 | 1 | 0 |
meth |
PluggableLayer.new |
3 | 0 | 0 |
meth |
PluggableLayer.register |
2 | 1 | 0 |
meth |
PluggableLayer.register_oot |
3 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.kernels.linear.mixed_precision.MPLinearKernel (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MPLinearKernel.config |
1 | 0 | 0 |
attr |
MPLinearKernel.w_q_name |
1 | 0 | 0 |
attr |
MPLinearKernel.w_s_name |
1 | 0 | 0 |
attr |
MPLinearKernel.w_zp_name |
1 | 0 | 0 |
attr |
MPLinearKernel.w_gidx_name |
1 | 0 | 0 |
vllm.model_executor.kernels.linear.mixed_precision.cpu (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CPUWNA16LinearKernel._process_gptq_weights |
2 | 1 | 0 |
meth |
CPUWNA16LinearKernel.process_weights_after_loading |
2 | 1 | 0 |
vllm.model_executor.kernels.linear.mixed_precision.cutlass (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CutlassW4A8LinearKernel.init |
3 | 0 | 0 |
meth |
CutlassW4A8LinearKernel.process_weights_after_loading |
2 | 1 | 0 |
attr |
CutlassW4A8LinearKernel.quant_fp8 |
1 | 0 | 0 |
vllm.model_executor.kernels.linear.mixed_precision.dynamic_4bit (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dynamic4bitLinearKernel.process_weights_after_loading |
2 | 1 | 0 |
vllm.model_executor.kernels.linear.mixed_precision.exllama (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExllamaLinearKernel.process_weights_after_loading |
2 | 1 | 0 |
vllm.model_executor.kernels.linear.mixed_precision.machete (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MacheteLinearKernel.process_weights_after_loading |
2 | 1 | 0 |
vllm.model_executor.kernels.linear.mixed_precision.xpu (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XPUwNa16LinearKernel.process_weights_after_loading |
2 | 1 | 0 |
vllm.model_executor.kernels.linear.scaled_mm.ScaledMMLinearKernel (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Int8ScaledMMLinearKernel._get_layer_params |
2 | 1 | 0 |
meth |
FP8ScaledMMLinearKernel._get_layer_params |
2 | 1 | 0 |
attr |
FP8ScaledMMLinearKernel.quant_fp8 |
1 | 0 | 0 |
attr |
FP8ScaledMMLinearKernel.fp8_dtype |
1 | 0 | 0 |
meth |
ScaledMMLinearKernel._get_layer_params |
2 | 1 | 0 |
attr |
ScaledMMLinearKernel.config |
1 | 0 | 0 |
attr |
ScaledMMLinearKernel.layer_param_names |
1 | 0 | 0 |
vllm.model_executor.kernels.linear.scaled_mm.xpu (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
XPUFP8ScaledMMLinearKernel.config |
1 | 0 | 0 |
attr |
XPUFP8ScaledMMLinearKernel.layer_param_names |
1 | 0 | 0 |
vllm.model_executor.layers.activation (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
SiluAndMul.init |
2 | 1 | 0 |
attr |
SiluAndMul.op |
1 | 0 | 0 |
func |
swiglustep_and_mul_triton |
4 | 3 | 0 |
meth |
SwigluOAIAndMul.init |
3 | 2 | 0 |
attr |
SwigluOAIAndMul.alpha |
1 | 0 | 0 |
attr |
SwigluOAIAndMul.limit |
1 | 0 | 0 |
meth |
QuickGELU.init |
1 | 0 | 0 |
attr |
QuickGELU.op |
1 | 0 | 0 |
meth |
FastGELU.init |
1 | 0 | 0 |
attr |
FastGELU.op |
1 | 0 | 0 |
meth |
GeluAndMul.init |
2 | 1 | 0 |
attr |
GeluAndMul.approximate |
1 | 0 | 0 |
attr |
GeluAndMul.op |
1 | 0 | 0 |
meth |
SwigluStepAndMul.init |
2 | 1 | 0 |
attr |
SwigluStepAndMul.limit |
1 | 0 | 0 |
meth |
ScaledActivation.init |
5 | 4 | 0 |
meth |
ScaledActivation.weight_loader |
3 | 2 | 0 |
attr |
ScaledActivation.act |
1 | 0 | 0 |
attr |
ScaledActivation.input_is_parallel |
1 | 0 | 0 |
attr |
ScaledActivation.scales |
1 | 0 | 0 |
meth |
MulAndSilu.init |
1 | 0 | 0 |
attr |
MulAndSilu.op |
1 | 0 | 0 |
meth |
NewGELU.init |
1 | 0 | 0 |
attr |
NewGELU.op |
1 | 0 | 0 |
meth |
GeluAndMulSparse.init |
3 | 2 | 0 |
attr |
GeluAndMulSparse.approximate |
1 | 0 | 0 |
attr |
GeluAndMulSparse.std_multiplier |
1 | 0 | 0 |
meth |
FatreluAndMul.init |
2 | 1 | 0 |
attr |
FatreluAndMul.threshold |
1 | 0 | 0 |
attr |
FatreluAndMul.op |
1 | 0 | 0 |
meth |
XIELU.init |
7 | 6 | 0 |
attr |
XIELU.alpha_p |
1 | 0 | 0 |
attr |
XIELU.alpha_n |
1 | 0 | 0 |
attr |
XIELU.with_vector_loads |
1 | 0 | 0 |
vllm.model_executor.layers.attention.attention (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Attention.init |
17 | 16 | 0 |
meth |
Attention.calc_kv_scales |
4 | 0 | 0 |
meth |
Attention.process_weights_after_loading |
2 | 1 | 0 |
attr |
Attention.kv_cache_torch_dtype |
1 | 0 | 0 |
attr |
Attention.kv_cache_dtype |
1 | 0 | 0 |
attr |
Attention.calculate_kv_scales |
1 | 0 | 0 |
attr |
Attention.quant_config |
1 | 0 | 0 |
attr |
Attention.layer_name |
1 | 0 | 0 |
attr |
Attention.num_heads |
1 | 0 | 0 |
attr |
Attention.head_size |
1 | 0 | 0 |
attr |
Attention.head_size_v |
1 | 0 | 0 |
attr |
Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Attention.sliding_window |
1 | 0 | 0 |
attr |
Attention.has_sink |
1 | 0 | 0 |
attr |
Attention.use_mm_prefix |
1 | 0 | 0 |
attr |
Attention.use_alibi_sqrt |
1 | 0 | 0 |
attr |
Attention.impl |
1 | 0 | 0 |
attr |
Attention.backend |
1 | 0 | 0 |
attr |
Attention.dtype |
1 | 0 | 0 |
attr |
Attention.use_direct_call |
1 | 0 | 0 |
attr |
Attention.use_output |
1 | 0 | 0 |
attr |
Attention.attn_type |
1 | 0 | 0 |
attr |
Attention.kv_sharing_target_layer_name |
1 | 0 | 0 |
attr |
Attention.kv_cache |
1 | 0 | 0 |
attr |
Attention.query_quant |
1 | 0 | 0 |
attr |
Attention.attn_backend |
1 | 0 | 0 |
func |
validate_kv_sharing_target |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.attention.chunked_local_attention (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChunkedLocalAttention.init |
11 | 10 | 0 |
attr |
ChunkedLocalAttention.attention_chunk_size |
1 | 0 | 0 |
vllm.model_executor.layers.attention.cross_attention (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CrossAttention.init |
7 | 5 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.attention.encoder_only_attention (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncoderOnlyAttention.init |
7 | 5 | 0 |
vllm.model_executor.layers.attention.mla_attention (119 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MLAAttention.init |
15 | 13 | 0 |
meth |
MLAAttention.process_weights_after_loading |
2 | 1 | 0 |
meth |
MLAAttention._v_up_proj |
3 | 2 | 0 |
attr |
MLAAttention.num_heads |
1 | 0 | 0 |
attr |
MLAAttention.scale |
1 | 0 | 0 |
attr |
MLAAttention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
MLAAttention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
MLAAttention.v_head_dim |
1 | 0 | 0 |
attr |
MLAAttention.q_lora_rank |
1 | 0 | 0 |
attr |
MLAAttention.kv_lora_rank |
1 | 0 | 0 |
attr |
MLAAttention.kv_b_proj |
1 | 0 | 0 |
attr |
MLAAttention.head_size |
1 | 0 | 0 |
attr |
MLAAttention.layer_name |
1 | 0 | 0 |
attr |
MLAAttention.indexer |
1 | 0 | 0 |
attr |
MLAAttention.num_kv_heads |
1 | 0 | 0 |
attr |
MLAAttention.qk_head_dim |
1 | 0 | 0 |
attr |
MLAAttention.quant_config |
1 | 0 | 0 |
attr |
MLAAttention.kv_cache_dtype |
1 | 0 | 0 |
attr |
MLAAttention.calculate_kv_scales |
1 | 0 | 0 |
attr |
MLAAttention.attn_backend |
1 | 0 | 0 |
attr |
MLAAttention.impl |
1 | 0 | 0 |
attr |
MLAAttention.q_pad_num_heads |
1 | 0 | 0 |
attr |
MLAAttention.use_direct_call |
1 | 0 | 0 |
attr |
MLAAttention.kv_cache |
1 | 0 | 0 |
attr |
MLAAttention.use_sparse |
1 | 0 | 0 |
attr |
MLAAttention.q_range |
1 | 0 | 0 |
attr |
MLAAttention.k_range |
1 | 0 | 0 |
attr |
MLAAttention.v_range |
1 | 0 | 0 |
attr |
MLAAttention.is_aiter_triton_fp8_bmm_enabled |
1 | 0 | 0 |
attr |
MLAAttention.is_aiter_triton_fp4_bmm_enabled |
1 | 0 | 0 |
attr |
MLAAttention.chunked_prefill_workspace_size |
1 | 0 | 0 |
meth |
MLACommonImpl._flash_attn_varlen_diff_headdims |
7 | 0 | 0 |
meth |
MLACommonImpl._run_prefill_new_tokens_fa |
6 | 1 | 0 |
meth |
MLACommonImpl._run_prefill_new_tokens_fi |
6 | 1 | 0 |
meth |
MLACommonImpl._run_prefill_new_tokens_cudnn |
6 | 1 | 0 |
meth |
MLACommonImpl._run_prefill_context_chunk_fa |
6 | 2 | 0 |
meth |
MLACommonImpl._run_prefill_context_chunk_fi |
6 | 2 | 0 |
meth |
MLACommonImpl._run_prefill_context_chunk_cudnn |
6 | 2 | 0 |
meth |
MLACommonImpl._run_prefill_new_tokens_trtllm_ragged |
6 | 1 | 0 |
meth |
MLACommonImpl._run_prefill_context_chunk_trtllm_ragged |
6 | 2 | 0 |
meth |
MLACommonImpl._compute_prefill_context |
5 | 4 | 0 |
meth |
MLACommonImpl._context_parallel_compute_prefill_context |
6 | 5 | 0 |
attr |
MLACommonImpl.num_heads |
1 | 0 | 0 |
attr |
MLACommonImpl.head_size |
1 | 0 | 0 |
attr |
MLACommonImpl.scale |
1 | 0 | 0 |
attr |
MLACommonImpl.num_kv_heads |
1 | 0 | 0 |
attr |
MLACommonImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
MLACommonImpl.q_lora_rank |
1 | 0 | 0 |
attr |
MLACommonImpl.kv_lora_rank |
1 | 0 | 0 |
attr |
MLACommonImpl.qk_nope_head_dim |
1 | 0 | 0 |
attr |
MLACommonImpl.qk_rope_head_dim |
1 | 0 | 0 |
attr |
MLACommonImpl.qk_head_dim |
1 | 0 | 0 |
attr |
MLACommonImpl.v_head_dim |
1 | 0 | 0 |
attr |
MLACommonImpl.kv_b_proj |
1 | 0 | 0 |
attr |
MLACommonImpl.indexer |
1 | 0 | 0 |
attr |
MLACommonImpl.q_pad_num_heads |
1 | 0 | 0 |
attr |
MLACommonImpl.supports_quant_query_input |
1 | 0 | 0 |
attr |
MLACommonImpl.flash_attn_varlen_func |
1 | 0 | 0 |
attr |
MLACommonImpl.vllm_flash_attn_version |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
MLACommonMetadata.post_init |
1 | 0 | 0 |
meth |
MLACommonMetadataBuilder.init |
7 | 6 | 0 |
meth |
MLACommonMetadataBuilder._build_fi_prefill_wrappers |
2 | 1 | 0 |
attr |
MLACommonMetadataBuilder.metadata_cls |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.kv_cache_spec |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.model_config |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.compilation_config |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.vllm_config |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.device |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.num_heads |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.mla_dims |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.aot_schedule |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.q_data_type |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.dcp_local_block_size |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.dcp_virtual_block_size |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.cp_kv_cache_interleave_size |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.chunked_prefill_workspace_size |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.prefill_metadata_cls |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.dcp_world_size |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.dcp_rank |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.page_size |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.chunked_prefill_workspace |
1 | 0 | 0 |
attr |
MLACommonMetadataBuilder.cudnn_workspace |
1 | 0 | 0 |
func |
dynamic_per_batched_tensor_quant |
3 | 2 | 0 |
vllm.model_executor.layers.attention.mm_encoder_attention (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MMEncoderAttention.num_heads |
1 | 0 | 0 |
attr |
MMEncoderAttention.head_size |
1 | 0 | 0 |
attr |
MMEncoderAttention.scale |
1 | 0 | 0 |
attr |
MMEncoderAttention.num_kv_heads |
1 | 0 | 0 |
attr |
MMEncoderAttention.layer_name |
1 | 0 | 0 |
attr |
MMEncoderAttention.num_queries_per_kv |
1 | 0 | 0 |
attr |
MMEncoderAttention.attn_backend |
1 | 0 | 0 |
attr |
MMEncoderAttention.is_flash_attn_backend |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.attention.static_sink_attention (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
StaticSinkAttention.init |
8 | 6 | 0 |
meth |
StaticSinkAttention.update_sink_kv |
3 | 1 | 0 |
meth |
StaticSinkAttention.forward |
3 | 0 | 0 |
meth |
StaticSinkAttention.populate_sink_kv |
2 | 0 | 0 |
attr |
StaticSinkAttention.sink_len |
1 | 0 | 0 |
attr |
StaticSinkAttention.block_size |
1 | 0 | 0 |
attr |
StaticSinkAttention.sink_populated |
1 | 0 | 0 |
attr |
StaticSinkAttention.sink_key |
1 | 0 | 0 |
attr |
StaticSinkAttention.sink_value |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.batch_invariant (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
mm_batch_invariant |
3 | 0 | 0 |
func |
mean_kernel |
12 | 1 | 0 |
func |
matmul_kernel_persistent |
23 | 9 | 0 |
func |
bmm_batch_invariant |
4 | 0 | 0 |
func |
mean_batch_invariant |
5 | 1 | 0 |
func |
init_batch_invariance |
2 | 1 | 0 |
func |
matmul_persistent |
4 | 3 | 0 |
func |
linear_batch_invariant |
4 | 0 | 0 |
func |
addmm_batch_invariant |
4 | 0 | 0 |
func |
softmax_batch_invariant |
4 | 0 | 0 |
func |
override_envs_for_invariance |
2 | 1 | 0 |
func |
matmul_batch_invariant |
4 | 0 | 0 |
func |
enable_batch_invariant_mode |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
bmm_kernel |
23 | 6 | 0 |
vllm.model_executor.layers.conv (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ConvLayerBase.in_channels |
1 | 0 | 0 |
attr |
ConvLayerBase.out_channels |
1 | 0 | 0 |
attr |
ConvLayerBase.kernel_size |
1 | 0 | 0 |
attr |
ConvLayerBase.stride |
1 | 0 | 0 |
attr |
ConvLayerBase.padding |
1 | 0 | 0 |
attr |
ConvLayerBase.dilation |
1 | 0 | 0 |
attr |
ConvLayerBase.groups |
1 | 0 | 0 |
attr |
ConvLayerBase.padding_mode |
1 | 0 | 0 |
attr |
ConvLayerBase.enable_linear |
1 | 0 | 0 |
attr |
ConvLayerBase.input_size |
1 | 0 | 0 |
attr |
ConvLayerBase.weight |
1 | 0 | 0 |
attr |
ConvLayerBase.bias |
1 | 0 | 0 |
vllm.model_executor.layers.fla.ops.chunk (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
chunk_gated_delta_rule |
11 | 10 | 0 |
func |
chunk_gated_delta_rule_fwd |
10 | 9 | 0 |
meth |
ChunkGatedDeltaRuleFunction.forward |
12 | 10 | 0 |
vllm.model_executor.layers.fla.ops.chunk_delta_h (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
chunk_gated_delta_rule_fwd_kernel_h_blockdim64 |
25 | 12 | 0 |
vllm.model_executor.layers.fla.ops.chunk_o (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
chunk_fwd_kernel_o |
20 | 9 | 0 |
attr |
BKV_LIST |
1 | 0 | 0 |
vllm.model_executor.layers.fla.ops.chunk_scaled_dot_kkt (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
chunk_scaled_dot_kkt_fwd_kernel |
15 | 7 | 0 |
vllm.model_executor.layers.fla.ops.cumsum (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
BS_LIST |
1 | 0 | 0 |
func |
chunk_local_cumsum_vector_kernel |
14 | 8 | 0 |
func |
chunk_local_cumsum_scalar_kernel |
12 | 6 | 0 |
func |
chunk_local_cumsum |
8 | 7 | 0 |
vllm.model_executor.layers.fla.ops.fused_recurrent (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
fused_recurrent_gated_delta_rule_fwd_kernel |
34 | 21 | 0 |
meth |
FusedRecurrentFunction.forward |
14 | 12 | 0 |
vllm.model_executor.layers.fla.ops.kda (89 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
rms_norm_gated |
10 | 9 | 0 |
func |
chunk_gla_fwd_o_gk |
10 | 9 | 0 |
func |
chunk_gla_fwd_kernel_o |
18 | 7 | 0 |
func |
layer_norm_gated_fwd |
11 | 10 | 0 |
func |
chunk_kda_fwd |
10 | 9 | 0 |
func |
layer_norm_gated_fwd_kernel1 |
19 | 8 | 0 |
func |
recompute_w_u_fwd_kernel |
24 | 10 | 0 |
func |
chunk_kda_scaled_dot_kkt_fwd_kernel_intra_sub_inter |
18 | 7 | 0 |
func |
kda_gate_fwd_kernel |
13 | 6 | 0 |
func |
fused_recurrent_kda |
13 | 12 | 0 |
func |
chunk_kda |
12 | 10 | 0 |
attr |
FusedRMSNormGated.hidden_size |
1 | 0 | 0 |
attr |
FusedRMSNormGated.elementwise_affine |
1 | 0 | 0 |
attr |
FusedRMSNormGated.eps |
1 | 0 | 0 |
attr |
FusedRMSNormGated.activation |
1 | 0 | 0 |
attr |
FusedRMSNormGated.weight |
1 | 0 | 0 |
func |
layer_norm_gated_fwd_kernel |
21 | 9 | 0 |
func |
chunk_kda_scaled_dot_kkt_fwd_kernel_intra_sub_intra |
17 | 6 | 0 |
vllm.model_executor.layers.fla.ops.l2norm (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
l2norm_fwd_kernel2 |
7 | 2 | 0 |
func |
l2norm_fwd_kernel |
9 | 3 | 0 |
attr |
USE_DEFAULT_FLA_NORM |
1 | 0 | 0 |
func |
l2norm_fwd_kernel1 |
6 | 1 | 0 |
func |
l2norm_fwd |
4 | 3 | 0 |
vllm.model_executor.layers.fla.ops.layernorm_guard (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RMSNormGated.init |
8 | 6 | 0 |
meth |
RMSNormGated.reset_parameters |
1 | 0 | 0 |
meth |
RMSNormGated.forward |
3 | 0 | 0 |
attr |
RMSNormGated.eps |
1 | 0 | 0 |
attr |
RMSNormGated.activation |
1 | 0 | 0 |
attr |
RMSNormGated.weight |
1 | 0 | 0 |
attr |
RMSNormGated.group_size |
1 | 0 | 0 |
attr |
RMSNormGated.norm_before_gate |
1 | 0 | 0 |
func |
layer_norm_fwd |
11 | 10 | 0 |
meth |
LayerNormGated.init |
7 | 5 | 0 |
meth |
LayerNormGated.reset_parameters |
1 | 0 | 0 |
meth |
LayerNormGated.forward |
3 | 0 | 0 |
attr |
LayerNormGated.eps |
1 | 0 | 0 |
attr |
LayerNormGated.weight |
1 | 0 | 0 |
attr |
LayerNormGated.bias |
1 | 0 | 0 |
attr |
LayerNormGated.group_size |
1 | 0 | 0 |
attr |
LayerNormGated.norm_before_gate |
1 | 0 | 0 |
func |
rms_norm_ref |
9 | 0 | 0 |
meth |
LayerNormFn.forward |
11 | 1 | 0 |
func |
layernorm_fn |
10 | 1 | 0 |
func |
rmsnorm_fn |
9 | 1 | 0 |
func |
layer_norm_fwd_kernel |
21 | 8 | 0 |
vllm.model_executor.layers.fla.ops.op (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
gather |
5 | 0 | 0 |
func |
make_tensor_descriptor |
6 | 0 | 0 |
vllm.model_executor.layers.fla.ops.solve_tril (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
merge_16x16_to_64x64_inverse_kernel |
11 | 5 | 0 |
attr |
FLA_TRIL_PRECISION |
1 | 0 | 0 |
func |
merge_16x16_to_32x32_inverse_kernel |
11 | 5 | 0 |
func |
solve_tril_16x16_kernel |
11 | 5 | 0 |
vllm.model_executor.layers.fla.ops.utils (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
SUPPRESS_LEVEL |
1 | 0 | 0 |
func |
get_all_max_shared_mem |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
device |
1 | 0 | 0 |
attr |
device_platform |
1 | 0 | 0 |
attr |
is_nvidia_hopper |
1 | 0 | 0 |
attr |
is_gather_supported |
1 | 0 | 0 |
attr |
FLA_CI_ENV |
1 | 0 | 0 |
attr |
is_intel_alchemist |
1 | 0 | 0 |
attr |
FLA_GDN_FIX_BT |
1 | 0 | 0 |
attr |
use_cuda_graph |
1 | 0 | 0 |
attr |
COMPILER_MODE |
1 | 0 | 0 |
attr |
is_tma_supported |
1 | 0 | 0 |
attr |
device_torch_lib |
1 | 0 | 0 |
vllm.model_executor.layers.fla.ops.wy_fast (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
recompute_w_u_fwd_kernel |
19 | 8 | 0 |
vllm.model_executor.layers.fused_moe (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
override_config |
2 | 0 | 0 |
attr |
fused_topk |
1 | 0 | 0 |
attr |
fused_experts |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.all2all_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.batched_deep_gemm_moe (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BatchedDeepGemmExperts.init |
5 | 4 | 0 |
meth |
BatchedDeepGemmExperts.apply |
16 | 15 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
persistent_masked_m_silu_mul_quant |
6 | 5 | 0 |
vllm.model_executor.layers.fused_moe.config (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedMoEConfig.post_init |
1 | 0 | 0 |
prop |
FusedMoEConfig.tp_size |
1 | 0 | 0 |
prop |
FusedMoEConfig.dp_size |
1 | 0 | 0 |
prop |
FusedMoEConfig.pcp_size |
1 | 0 | 0 |
prop |
FusedMoEConfig.ep_size |
1 | 0 | 0 |
prop |
FusedMoEConfig.sp_size |
1 | 0 | 0 |
prop |
FusedMoEConfig.is_sequence_parallel |
1 | 0 | 0 |
prop |
FusedMoEConfig.tp_rank |
1 | 0 | 0 |
prop |
FusedMoEConfig.dp_rank |
1 | 0 | 0 |
prop |
FusedMoEConfig.pcp_rank |
1 | 0 | 0 |
prop |
FusedMoEConfig.ep_rank |
1 | 0 | 0 |
prop |
FusedMoEConfig.use_ep |
1 | 0 | 0 |
prop |
FusedMoEConfig.use_deepep_ht_kernels |
1 | 0 | 0 |
prop |
FusedMoEConfig.use_deepep_ll_kernels |
1 | 0 | 0 |
prop |
FusedMoEConfig.use_mori_kernels |
1 | 0 | 0 |
prop |
FusedMoEConfig.use_fi_all2allv_kernels |
1 | 0 | 0 |
prop |
FusedMoEConfig.use_naive_all2all_kernels |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
gptq_marlin_moe_quant_config |
9 | 8 | 0 |
prop |
FusedMoEParallelConfig.use_all2all_kernels |
1 | 0 | 0 |
prop |
FusedMoEParallelConfig.use_deepep_ht_kernels |
1 | 0 | 0 |
prop |
FusedMoEParallelConfig.use_deepep_ll_kernels |
1 | 0 | 0 |
prop |
FusedMoEParallelConfig.use_fi_all2allv_kernels |
1 | 0 | 0 |
prop |
FusedMoEParallelConfig.use_batched_activation_format |
1 | 0 | 0 |
prop |
FusedMoEParallelConfig.use_naive_all2all_kernels |
1 | 0 | 0 |
prop |
FusedMoEParallelConfig.use_mori_kernels |
1 | 0 | 0 |
meth |
FusedMoEQuantConfig.post_init |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.cpu_fused_moe (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
CPUFusedMOE.isa |
1 | 0 | 0 |
attr |
CPUFusedMOE.forward_method |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.cutlass_moe (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CutlassExpertsW4A8Fp8.init |
13 | 12 | 0 |
meth |
CutlassExpertsW4A8Fp8.apply |
16 | 15 | 0 |
attr |
CutlassExpertsW4A8Fp8.out_dtype |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.a_strides1 |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.a_strides2 |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.b_strides1 |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.b_strides2 |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.c_strides1 |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.c_strides2 |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.s_strides1 |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.s_strides2 |
1 | 0 | 0 |
attr |
CutlassExpertsW4A8Fp8.group_size |
1 | 0 | 0 |
func |
run_cutlass_moe_fp8 |
25 | 24 | 0 |
func |
run_cutlass_moe_w4a8_fp8 |
32 | 31 | 0 |
meth |
CutlassExpertsFp8Base.init |
5 | 4 | 0 |
meth |
CutlassExpertsFp8Base.apply |
16 | 15 | 0 |
attr |
CutlassExpertsFp8Base.out_dtype |
1 | 0 | 0 |
attr |
CutlassExpertsFp8Base.ab_strides1 |
1 | 0 | 0 |
attr |
CutlassExpertsFp8Base.ab_strides2 |
1 | 0 | 0 |
attr |
CutlassExpertsFp8Base.c_strides1 |
1 | 0 | 0 |
attr |
CutlassExpertsFp8Base.c_strides2 |
1 | 0 | 0 |
attr |
FLOAT4_E2M1_MAX |
1 | 0 | 0 |
meth |
CutlassExpertsFp4.apply |
16 | 15 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.deep_gemm_moe (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepGemmExperts.init |
3 | 2 | 0 |
meth |
DeepGemmExperts.apply |
16 | 15 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.deep_gemm_utils (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
apply_expert_map |
3 | 0 | 0 |
func |
ep_gather |
7 | 6 | 0 |
func |
ep_scatter |
11 | 10 | 0 |
func |
deepgemm_moe_permute |
8 | 7 | 0 |
func |
compute_aligned_M |
6 | 5 | 0 |
func |
deepgemm_unpermute_and_reduce |
7 | 6 | 0 |
vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepEPHTPrepareAndFinalize.init |
5 | 4 | 0 |
attr |
DeepEPHTPrepareAndFinalize.buffer |
1 | 0 | 0 |
attr |
DeepEPHTPrepareAndFinalize.num_dispatchers_ |
1 | 0 | 0 |
attr |
DeepEPHTPrepareAndFinalize.dp_size |
1 | 0 | 0 |
attr |
DeepEPHTPrepareAndFinalize.rank_expert_offset |
1 | 0 | 0 |
attr |
DeepEPHTPrepareAndFinalize.async_prepare |
1 | 0 | 0 |
attr |
DeepEPHTPrepareAndFinalize.handles |
1 | 0 | 0 |
attr |
DeepEPHTPrepareAndFinalize.available_rank_configs |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepEPLLPrepareAndFinalize.init |
8 | 7 | 0 |
meth |
DeepEPLLPrepareAndFinalize.post_init_setup |
2 | 1 | 0 |
attr |
DeepEPLLPrepareAndFinalize.buffer |
1 | 0 | 0 |
attr |
DeepEPLLPrepareAndFinalize.max_tokens_per_rank |
1 | 0 | 0 |
attr |
DeepEPLLPrepareAndFinalize.use_fp8_dispatch |
1 | 0 | 0 |
attr |
DeepEPLLPrepareAndFinalize.num_dispatchers_ |
1 | 0 | 0 |
attr |
DeepEPLLPrepareAndFinalize.global_to_physical |
1 | 0 | 0 |
attr |
DeepEPLLPrepareAndFinalize.physical_to_global |
1 | 0 | 0 |
attr |
DeepEPLLPrepareAndFinalize.local_expert_global_ids |
1 | 0 | 0 |
attr |
DeepEPLLPrepareAndFinalize.use_ue8m0_dispatch |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.experts.trtllm_fp8_moe (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TrtLlmFp8Experts.init |
3 | 2 | 0 |
attr |
TrtLlmFp8Experts.routing_method_type |
1 | 0 | 0 |
attr |
TrtLlmFp8Experts.topk |
1 | 0 | 0 |
attr |
TrtLlmFp8Experts.intermediate_size_per_partition |
1 | 0 | 0 |
attr |
TrtLlmFp8Experts.hidden_dim |
1 | 0 | 0 |
attr |
TrtLlmFp8Experts.local_num_experts |
1 | 0 | 0 |
attr |
TrtLlmFp8Experts.ep_rank |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.experts.trtllm_nvfp4_moe (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TrtLlmNvFp4ExpertsModular.apply |
16 | 15 | 0 |
meth |
TrtLlmNvFp4ExpertsBase.init |
3 | 2 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.moe_config |
1 | 0 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.quant_config |
1 | 0 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.routing_method_type |
1 | 0 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.topk |
1 | 0 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.intermediate_size_per_partition |
1 | 0 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.hidden_dim |
1 | 0 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.local_num_experts |
1 | 0 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.ep_rank |
1 | 0 | 0 |
attr |
TrtLlmNvFp4ExpertsBase.g1_scale_c |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.fallback (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FallbackExperts.init |
3 | 2 | 0 |
meth |
FallbackExperts.apply |
16 | 15 | 0 |
attr |
FallbackExperts.fallback_experts |
1 | 0 | 0 |
attr |
FallbackExperts.experts |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.flashinfer_a2a_prepare_finalize (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_local_sizes |
1 | 0 | 0 |
func |
flashinfer_alltoall_dispatch |
11 | 10 | 0 |
meth |
FlashInferA2APrepareAndFinalize.init |
2 | 1 | 0 |
attr |
FlashInferA2APrepareAndFinalize.num_dispatchers_ |
1 | 0 | 0 |
attr |
FlashInferA2APrepareAndFinalize.all2all_manager |
1 | 0 | 0 |
func |
flashinfer_alltoall_combine |
6 | 4 | 0 |
vllm.model_executor.layers.fused_moe.flashinfer_cutedsl_moe (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
flashinfer_cutedsl_moe_masked |
13 | 10 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
FlashInferCuteDSLExperts.init |
5 | 4 | 0 |
meth |
FlashInferCuteDSLExperts.apply |
16 | 15 | 0 |
attr |
FlashInferCuteDSLExperts.out_dtype |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.flashinfer_cutlass_moe (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
FlashInferExperts.init |
3 | 2 | 0 |
meth |
FlashInferExperts.apply |
16 | 15 | 0 |
attr |
FlashInferExperts.device |
1 | 0 | 0 |
attr |
FlashInferExperts.num_experts |
1 | 0 | 0 |
attr |
FlashInferExperts.ep_rank |
1 | 0 | 0 |
attr |
FlashInferExperts.ep_size |
1 | 0 | 0 |
attr |
FlashInferExperts.tp_rank |
1 | 0 | 0 |
attr |
FlashInferExperts.tp_size |
1 | 0 | 0 |
attr |
FlashInferExperts.out_dtype |
1 | 0 | 0 |
attr |
FlashInferExperts.use_dp |
1 | 0 | 0 |
attr |
FlashInferExperts.use_deepseek_fp8_block_scale |
1 | 0 | 0 |
attr |
FlashInferExperts.max_capture_size |
1 | 0 | 0 |
attr |
FlashInferExperts.gemm1_alpha |
1 | 0 | 0 |
attr |
FlashInferExperts.gemm1_beta |
1 | 0 | 0 |
attr |
FlashInferExperts.gemm1_clamp_limit |
1 | 0 | 0 |
attr |
FlashInferExperts.fake_input_scale |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.fused_batched_moe (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BatchedTritonExperts.init |
5 | 4 | 0 |
meth |
BatchedTritonExperts.apply |
16 | 15 | 0 |
meth |
BatchedPrepareAndFinalize.init |
5 | 4 | 0 |
attr |
BatchedPrepareAndFinalize.max_num_tokens |
1 | 0 | 0 |
attr |
BatchedPrepareAndFinalize.num_local_experts |
1 | 0 | 0 |
attr |
BatchedPrepareAndFinalize.rank |
1 | 0 | 0 |
attr |
BatchedPrepareAndFinalize.num_dispatchers_ |
1 | 0 | 0 |
meth |
NaiveBatchedExperts.init |
5 | 4 | 0 |
meth |
NaiveBatchedExperts.apply |
16 | 15 | 0 |
func |
moe_mmk |
28 | 17 | 0 |
func |
batched_triton_kernel |
35 | 24 | 0 |
func |
expert_triton_kernel |
33 | 19 | 0 |
func |
invoke_moe_batched_triton_kernel |
15 | 14 | 0 |
vllm.model_executor.layers.fused_moe.fused_marlin_moe (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarlinExpertsBase.init |
10 | 9 | 0 |
attr |
MarlinExpertsBase.w13_g_idx |
1 | 0 | 0 |
attr |
MarlinExpertsBase.w2_g_idx |
1 | 0 | 0 |
attr |
MarlinExpertsBase.w13_g_idx_sort_indices |
1 | 0 | 0 |
attr |
MarlinExpertsBase.w2_g_idx_sort_indices |
1 | 0 | 0 |
attr |
MarlinExpertsBase.is_k_full |
1 | 0 | 0 |
attr |
MarlinExpertsBase.input_dtype |
1 | 0 | 0 |
meth |
BatchedMarlinExperts.init |
10 | 9 | 0 |
meth |
BatchedMarlinExperts.apply |
16 | 15 | 0 |
meth |
MarlinExperts.apply |
16 | 15 | 0 |
vllm.model_executor.layers.fused_moe.fused_moe (76 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TritonExperts.init |
3 | 2 | 0 |
meth |
TritonExperts.apply |
16 | 15 | 0 |
meth |
TritonWNA16Experts.apply |
16 | 15 | 0 |
func |
get_moe_wna16_block_config |
10 | 9 | 0 |
func |
fused_moe_kernel |
45 | 16 | 0 |
func |
torch_vllm_outplace_fused_experts |
2 | 1 | 0 |
func |
invoke_fused_moe_wna16_triton_kernel |
17 | 16 | 0 |
func |
invoke_fused_moe_wna16_cuda_kernel |
14 | 13 | 0 |
func |
fused_moe_kernel_gptq_awq |
40 | 15 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
invoke_fused_moe_triton_kernel |
21 | 20 | 0 |
func |
should_moe_wna16_use_cuda |
5 | 4 | 0 |
func |
torch_vllm_inplace_fused_experts |
2 | 1 | 0 |
func |
write_zeros_to_output |
11 | 0 | 0 |
vllm.model_executor.layers.fused_moe.fused_moe_method_base (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedMoEMethodBase.init |
2 | 1 | 0 |
meth |
FusedMoEMethodBase.create_weights |
7 | 5 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.fused_moe_modular_method (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedMoEModularMethod.init |
3 | 2 | 0 |
meth |
FusedMoEModularMethod.create_weights |
7 | 5 | 0 |
attr |
FusedMoEModularMethod.moe_quant_config |
1 | 0 | 0 |
attr |
FusedMoEModularMethod.moe_kernel |
1 | 0 | 0 |
attr |
FusedMoEModularMethod.disable_expert_map |
1 | 0 | 0 |
attr |
FusedMoEModularMethod.old_quant_method |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.gpt_oss_triton_kernels_moe (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
pack_bitmatrix |
8 | 3 | 0 |
func |
triton_kernel_fused_mxfp4_w4a8_experts |
20 | 11 | 0 |
func |
triton_kernel_moe_forward |
16 | 10 | 0 |
meth |
OAITritonExperts.apply |
16 | 15 | 0 |
meth |
UnfusedOAITritonExperts.moe_sum |
3 | 2 | 0 |
meth |
UnfusedOAITritonExperts.apply |
16 | 15 | 0 |
func |
triton_kernel_fused_experts |
18 | 13 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.layer (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedMoE.init |
34 | 32 | 0 |
meth |
FusedMoE._init_runner |
1 | 0 | 0 |
meth |
FusedMoE._replace_quant_method |
2 | 1 | 0 |
meth |
FusedMoE.update_expert_map |
1 | 0 | 0 |
meth |
FusedMoE._load_per_tensor_weight_scale |
5 | 4 | 0 |
meth |
FusedMoE._load_combined_w13_weight_scale |
5 | 4 | 0 |
meth |
FusedMoE._load_model_weight_or_group_weight_scale |
7 | 6 | 0 |
meth |
FusedMoE._load_per_channel_weight_scale |
6 | 5 | 0 |
meth |
FusedMoE._load_w13 |
7 | 6 | 0 |
meth |
FusedMoE._load_w2 |
6 | 5 | 0 |
meth |
FusedMoE._load_single_value |
4 | 3 | 0 |
meth |
FusedMoE._load_g_idx |
6 | 5 | 0 |
meth |
FusedMoE._init_aiter_shared_experts_topK_buffer |
3 | 2 | 0 |
meth |
FusedMoE.ensure_moe_quant_config_init |
1 | 0 | 0 |
meth |
FusedMoE.maybe_all_reduce_tensor_model_parallel |
2 | 1 | 0 |
prop |
FusedMoE.layer_id |
1 | 0 | 0 |
prop |
FusedMoE.tp_size |
1 | 0 | 0 |
prop |
FusedMoE.ep_size |
1 | 0 | 0 |
prop |
FusedMoE.tp_rank |
1 | 0 | 0 |
prop |
FusedMoE.ep_rank |
1 | 0 | 0 |
prop |
FusedMoE.use_ep |
1 | 0 | 0 |
attr |
FusedMoE.params_dtype |
1 | 0 | 0 |
attr |
FusedMoE.vllm_config |
1 | 0 | 0 |
attr |
FusedMoE.is_sequence_parallel |
1 | 0 | 0 |
attr |
FusedMoE.sp_size |
1 | 0 | 0 |
attr |
FusedMoE.global_num_experts |
1 | 0 | 0 |
attr |
FusedMoE.logical_num_experts |
1 | 0 | 0 |
attr |
FusedMoE.expert_mapping |
1 | 0 | 0 |
attr |
FusedMoE.layer_name |
1 | 0 | 0 |
attr |
FusedMoE.enable_eplb |
1 | 0 | 0 |
attr |
FusedMoE.eplb_state |
1 | 0 | 0 |
attr |
FusedMoE.rocm_aiter_fmoe_enabled |
1 | 0 | 0 |
attr |
FusedMoE.aiter_fmoe_shared_expert_enabled |
1 | 0 | 0 |
attr |
FusedMoE.num_fused_shared_experts |
1 | 0 | 0 |
attr |
FusedMoE.top_k |
1 | 0 | 0 |
attr |
FusedMoE.intermediate_size_per_partition |
1 | 0 | 0 |
attr |
FusedMoE.reduce_results |
1 | 0 | 0 |
attr |
FusedMoE.renormalize |
1 | 0 | 0 |
attr |
FusedMoE.use_grouped_topk |
1 | 0 | 0 |
attr |
FusedMoE.num_expert_group |
1 | 0 | 0 |
attr |
FusedMoE.topk_group |
1 | 0 | 0 |
attr |
FusedMoE.custom_routing_function |
1 | 0 | 0 |
attr |
FusedMoE.scoring_func |
1 | 0 | 0 |
attr |
FusedMoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
FusedMoE.e_score_correction_bias |
1 | 0 | 0 |
attr |
FusedMoE.apply_router_weight_on_input |
1 | 0 | 0 |
attr |
FusedMoE.activation |
1 | 0 | 0 |
attr |
FusedMoE.router |
1 | 0 | 0 |
attr |
FusedMoE.model_type |
1 | 0 | 0 |
attr |
FusedMoE.hidden_size |
1 | 0 | 0 |
attr |
FusedMoE.quant_config |
1 | 0 | 0 |
attr |
FusedMoE.base_quant_method |
1 | 0 | 0 |
attr |
FusedMoE.use_overlapped |
1 | 0 | 0 |
attr |
FusedMoE.runner |
1 | 0 | 0 |
attr |
FusedMoE.local_num_experts |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.modular_kernel (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedMoEPrepareAndFinalize.post_init_setup |
2 | 1 | 0 |
meth |
FusedMoEKernelMonolithicImpl.init |
3 | 2 | 0 |
attr |
FusedMoEKernelMonolithicImpl.prepare_finalize |
1 | 0 | 0 |
attr |
FusedMoEKernelMonolithicImpl.fused_experts |
1 | 0 | 0 |
meth |
FusedMoEKernel.init |
6 | 5 | 0 |
meth |
FusedMoEKernel._post_init_setup |
1 | 0 | 0 |
attr |
FusedMoEKernel.shared_experts |
1 | 0 | 0 |
meth |
FusedMoEKernelModularImpl.init |
6 | 5 | 0 |
attr |
FusedMoEKernelModularImpl.prepare_finalize |
1 | 0 | 0 |
attr |
FusedMoEKernelModularImpl.fused_experts |
1 | 0 | 0 |
attr |
FusedMoEKernelModularImpl.shared_experts |
1 | 0 | 0 |
attr |
FusedMoEKernelModularImpl.moe_parallel_config |
1 | 0 | 0 |
attr |
FusedMoEKernelModularImpl.inplace |
1 | 0 | 0 |
attr |
FusedMoEKernelModularImpl.is_dp_ep |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
FusedMoEExperts.init |
5 | 4 | 0 |
meth |
FusedMoEExperts.enable_chunking |
1 | 0 | 0 |
attr |
FusedMoEExperts.moe_config |
1 | 0 | 0 |
attr |
FusedMoEExperts.quant_config |
1 | 0 | 0 |
attr |
FusedMoEExperts.max_num_tokens |
1 | 0 | 0 |
attr |
FusedMoEExperts.num_dispatchers |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.moe_permute_unpermute (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
moe_permute_unpermute_supported |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.mori_prepare_finalize (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoriPrepareAndFinalize.init |
5 | 4 | 0 |
meth |
MoriPrepareAndFinalize.num_dispatchers |
1 | 0 | 0 |
attr |
MoriPrepareAndFinalize.mori_op |
1 | 0 | 0 |
attr |
MoriPrepareAndFinalize.num_dispatchers_ |
1 | 0 | 0 |
attr |
MoriPrepareAndFinalize.max_tokens_per_rank |
1 | 0 | 0 |
attr |
MoriPrepareAndFinalize.use_fp8_dispatch |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.oracle.fp8 (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.oracle.nvfp4 (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.oracle.unquantized (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.prepare_finalize.naive_dp_ep (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MoEPrepareAndFinalizeNaiveDPEPMonolithic.is_sequence_parallel |
1 | 0 | 0 |
attr |
MoEPrepareAndFinalizeNaiveDPEPModular.is_sequence_parallel |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AiterExperts.supports_expert_map |
1 | 0 | 0 |
meth |
AiterExperts.supports_chunking |
1 | 0 | 0 |
meth |
AiterExperts.apply |
16 | 15 | 0 |
func |
init_aiter_topK_meta_data |
9 | 8 | 0 |
vllm.model_executor.layers.fused_moe.routed_experts_capturer (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.router.base_router (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseRouter.init |
6 | 5 | 0 |
attr |
BaseRouter.top_k |
1 | 0 | 0 |
attr |
BaseRouter.global_num_experts |
1 | 0 | 0 |
attr |
BaseRouter.eplb_state |
1 | 0 | 0 |
attr |
BaseRouter.enable_eplb |
1 | 0 | 0 |
attr |
BaseRouter.indices_type_getter |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.router.custom_routing_router (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CustomRoutingRouter.init |
8 | 7 | 0 |
attr |
CustomRoutingRouter.custom_routing_function |
1 | 0 | 0 |
attr |
CustomRoutingRouter.renormalize |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.router.fused_topk_bias_router (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedTopKBiasRouter.init |
10 | 9 | 0 |
attr |
FusedTopKBiasRouter.e_score_correction_bias |
1 | 0 | 0 |
attr |
FusedTopKBiasRouter.renormalize |
1 | 0 | 0 |
attr |
FusedTopKBiasRouter.scoring_func |
1 | 0 | 0 |
attr |
FusedTopKBiasRouter.routed_scaling_factor |
1 | 0 | 0 |
func |
fused_topk_bias |
8 | 7 | 0 |
vllm.model_executor.layers.fused_moe.router.fused_topk_router (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedTopKRouter.init |
8 | 7 | 0 |
attr |
FusedTopKRouter.renormalize |
1 | 0 | 0 |
attr |
FusedTopKRouter.scoring_func |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.router.gate_linear (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GateLinear.init |
8 | 7 | 0 |
attr |
GateLinear.out_dtype |
1 | 0 | 0 |
attr |
GateLinear.allow_specialized_router_gemm |
1 | 0 | 0 |
attr |
GateLinear.allow_dsv3_router_gemm |
1 | 0 | 0 |
attr |
GateLinear.allow_cublas_router_gemm |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.router.grouped_topk_router (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
GroupedTopk.native_impl |
1 | 0 | 0 |
attr |
GroupedTopk.topk |
1 | 0 | 0 |
attr |
GroupedTopk.renormalize |
1 | 0 | 0 |
attr |
GroupedTopk.num_expert_group |
1 | 0 | 0 |
attr |
GroupedTopk.topk_group |
1 | 0 | 0 |
attr |
GroupedTopk.scoring_func |
1 | 0 | 0 |
attr |
GroupedTopk.routed_scaling_factor |
1 | 0 | 0 |
attr |
GroupedTopk.num_fused_shared_experts |
1 | 0 | 0 |
meth |
GroupedTopKRouter.init |
13 | 12 | 0 |
attr |
GroupedTopKRouter.num_expert_group |
1 | 0 | 0 |
attr |
GroupedTopKRouter.topk_group |
1 | 0 | 0 |
attr |
GroupedTopKRouter.renormalize |
1 | 0 | 0 |
attr |
GroupedTopKRouter.scoring_func |
1 | 0 | 0 |
attr |
GroupedTopKRouter.routed_scaling_factor |
1 | 0 | 0 |
attr |
GroupedTopKRouter.e_score_correction_bias |
1 | 0 | 0 |
attr |
GroupedTopKRouter.num_fused_shared_experts |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.router.routing_simulator_router (7 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RoutingSimulator.register_strategy |
3 | 2 | 0 |
meth |
RoutingSimulatorRouter.init |
6 | 5 | 0 |
meth |
DistributionBasedRouting.init |
3 | 2 | 1 |
meth |
DistributionBasedRouting._validate_distribution_params |
1 | 0 | 0 |
attr |
DistributionBasedRouting.distribution |
1 | 0 | 0 |
attr |
DistributionBasedRouting.distribution_params |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.runner.default_moe_runner (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
DefaultMoERunner.init |
10 | 9 | 0 |
meth |
DefaultMoERunner.ensure_dp_chunking_init |
1 | 0 | 0 |
meth |
DefaultMoERunner.maybe_all_reduce_tensor_model_parallel |
2 | 1 | 0 |
attr |
DefaultMoERunner.moe_config |
1 | 0 | 0 |
attr |
DefaultMoERunner.router |
1 | 0 | 0 |
attr |
DefaultMoERunner.routed_input_transform |
1 | 0 | 0 |
attr |
DefaultMoERunner.gate |
1 | 0 | 0 |
attr |
DefaultMoERunner.shared_experts |
1 | 0 | 0 |
attr |
DefaultMoERunner.quant_method |
1 | 0 | 0 |
attr |
DefaultMoERunner.reduce_results |
1 | 0 | 0 |
attr |
DefaultMoERunner.enable_dbo |
1 | 0 | 0 |
attr |
DefaultMoERunner.layer_name |
1 | 0 | 0 |
attr |
DefaultMoERunner.shared_experts_stream |
1 | 0 | 0 |
attr |
DefaultMoERunner.moe_forward |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.runner.moe_runner (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoERunner.maybe_all_reduce_tensor_model_parallel |
2 | 1 | 0 |
vllm.model_executor.layers.fused_moe.topk_weight_and_reduce (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TopKWeightAndReduceNaiveBatched.init |
2 | 1 | 0 |
meth |
TopKWeightAndReduceNaiveBatched.eq |
2 | 0 | 0 |
attr |
TopKWeightAndReduceNaiveBatched.rank |
1 | 0 | 0 |
meth |
TopKWeightAndReduceNoOP.eq |
2 | 0 | 0 |
meth |
TopKWeightAndReduceContiguous.eq |
2 | 0 | 0 |
meth |
TopKWeightAndReduceDelegate.eq |
2 | 0 | 0 |
vllm.model_executor.layers.fused_moe.triton_cutlass_moe (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TritonOrCutlassExperts.init |
3 | 2 | 0 |
attr |
TritonOrCutlassExperts.is_sm100 |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.triton_deep_gemm_moe (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TritonOrDeepGemmExperts.init |
3 | 2 | 0 |
vllm.model_executor.layers.fused_moe.trtllm_moe (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TrtLlmGenExperts.init |
4 | 2 | 0 |
meth |
TrtLlmGenExperts.apply |
16 | 15 | 0 |
attr |
TrtLlmGenExperts.device |
1 | 0 | 0 |
attr |
TrtLlmGenExperts.num_experts |
1 | 0 | 0 |
attr |
TrtLlmGenExperts.gemm1_alpha |
1 | 0 | 0 |
attr |
TrtLlmGenExperts.gemm1_beta |
1 | 0 | 0 |
attr |
TrtLlmGenExperts.gemm1_clamp_limit |
1 | 0 | 0 |
attr |
TrtLlmGenExperts.max_capture_size |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.unquantized_fused_moe_method (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UnquantizedFusedMoEMethod.init |
2 | 1 | 0 |
meth |
UnquantizedFusedMoEMethod.create_weights |
7 | 5 | 0 |
attr |
UnquantizedFusedMoEMethod.unquantized_backend |
1 | 0 | 0 |
attr |
UnquantizedFusedMoEMethod.rocm_aiter_moe_enabled |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.xpu_fused_moe (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XPUExpertsFp8.init |
5 | 4 | 0 |
attr |
XPUExpertsFp8.is_fp8 |
1 | 0 | 0 |
meth |
XPUExperts.init |
5 | 4 | 0 |
meth |
XPUExperts.apply |
16 | 15 | 0 |
attr |
XPUExperts.is_fp8 |
1 | 0 | 0 |
vllm.model_executor.layers.fused_moe.zero_expert_fused_moe (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZeroExpertFusedMoE.init |
5 | 3 | 0 |
meth |
ZeroExpertFusedMoE._temporarily_set_attrs |
2 | 0 | 0 |
attr |
ZeroExpertFusedMoE.zero_expert_num |
1 | 0 | 0 |
attr |
ZeroExpertFusedMoE.zero_expert_type |
1 | 0 | 0 |
attr |
ZeroExpertFusedMoE.custom_routing_function |
1 | 0 | 0 |
vllm.model_executor.layers.kda (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KimiDeltaAttention.init |
9 | 8 | 0 |
attr |
KimiDeltaAttention.tp_size |
1 | 0 | 0 |
attr |
KimiDeltaAttention.tp_rank |
1 | 0 | 0 |
attr |
KimiDeltaAttention.hidden_size |
1 | 0 | 0 |
attr |
KimiDeltaAttention.model_config |
1 | 0 | 0 |
attr |
KimiDeltaAttention.cache_config |
1 | 0 | 0 |
attr |
KimiDeltaAttention.head_dim |
1 | 0 | 0 |
attr |
KimiDeltaAttention.num_heads |
1 | 0 | 0 |
attr |
KimiDeltaAttention.layer_idx |
1 | 0 | 0 |
attr |
KimiDeltaAttention.prefix |
1 | 0 | 0 |
attr |
KimiDeltaAttention.local_num_heads |
1 | 0 | 0 |
attr |
KimiDeltaAttention.conv_size |
1 | 0 | 0 |
attr |
KimiDeltaAttention.q_proj |
1 | 0 | 0 |
attr |
KimiDeltaAttention.k_proj |
1 | 0 | 0 |
attr |
KimiDeltaAttention.v_proj |
1 | 0 | 0 |
attr |
KimiDeltaAttention.f_a_proj |
1 | 0 | 0 |
attr |
KimiDeltaAttention.f_b_proj |
1 | 0 | 0 |
attr |
KimiDeltaAttention.dt_bias |
1 | 0 | 0 |
attr |
KimiDeltaAttention.b_proj |
1 | 0 | 0 |
attr |
KimiDeltaAttention.q_conv1d |
1 | 0 | 0 |
attr |
KimiDeltaAttention.k_conv1d |
1 | 0 | 0 |
attr |
KimiDeltaAttention.v_conv1d |
1 | 0 | 0 |
attr |
KimiDeltaAttention.A_log |
1 | 0 | 0 |
attr |
KimiDeltaAttention.g_a_proj |
1 | 0 | 0 |
attr |
KimiDeltaAttention.g_b_proj |
1 | 0 | 0 |
attr |
KimiDeltaAttention.o_norm |
1 | 0 | 0 |
attr |
KimiDeltaAttention.o_proj |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.layernorm (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayerNorm.init |
3 | 2 | 0 |
meth |
LayerNorm.forward |
2 | 1 | 0 |
attr |
LayerNorm.dim |
1 | 0 | 0 |
attr |
LayerNorm.eps |
1 | 0 | 0 |
attr |
LayerNorm.weight |
1 | 0 | 0 |
attr |
LayerNorm.bias |
1 | 0 | 0 |
meth |
RMSNormGated.init |
8 | 7 | 0 |
meth |
RMSNormGated.reset_parameters |
1 | 0 | 0 |
attr |
RMSNormGated.eps |
1 | 0 | 0 |
attr |
RMSNormGated.activation |
1 | 0 | 0 |
attr |
RMSNormGated.weight |
1 | 0 | 0 |
attr |
RMSNormGated.group_size |
1 | 0 | 0 |
attr |
RMSNormGated.norm_before_gate |
1 | 0 | 0 |
attr |
RMSNorm.hidden_size |
1 | 0 | 0 |
attr |
RMSNorm.variance_epsilon |
1 | 0 | 0 |
attr |
RMSNorm.variance_size_override |
1 | 0 | 0 |
attr |
RMSNorm.has_weight |
1 | 0 | 0 |
attr |
RMSNorm.weight |
1 | 0 | 0 |
attr |
RMSNorm.rocm_norm_func |
1 | 0 | 0 |
attr |
RMSNorm.rocm_norm_func_with_add |
1 | 0 | 0 |
func |
dispatch_rocm_rmsnorm_func |
4 | 3 | 0 |
attr |
GemmaRMSNorm.weight |
1 | 0 | 0 |
attr |
GemmaRMSNorm.variance_epsilon |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.linear (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
LinearMethodBase.create_weights |
8 | 6 | 0 |
meth |
QKVParallelLinear.init |
13 | 12 | 0 |
meth |
QKVParallelLinear.validate_shard_id |
2 | 1 | 0 |
meth |
QKVParallelLinear._get_shard_offset_mapping |
2 | 1 | 0 |
meth |
QKVParallelLinear._get_shard_size_mapping |
2 | 1 | 0 |
meth |
QKVParallelLinear._load_fused_module_from_checkpoint |
3 | 2 | 0 |
meth |
QKVParallelLinear.weight_loader_v2 |
4 | 3 | 0 |
meth |
QKVParallelLinear.weight_loader |
4 | 3 | 0 |
attr |
QKVParallelLinear.hidden_size |
1 | 0 | 0 |
attr |
QKVParallelLinear.head_size |
1 | 0 | 0 |
attr |
QKVParallelLinear.v_head_size |
1 | 0 | 0 |
attr |
QKVParallelLinear.total_num_heads |
1 | 0 | 0 |
attr |
QKVParallelLinear.total_num_kv_heads |
1 | 0 | 0 |
attr |
QKVParallelLinear.num_heads |
1 | 0 | 0 |
attr |
QKVParallelLinear.output_sizes |
1 | 0 | 0 |
attr |
QKVParallelLinear.num_kv_heads |
1 | 0 | 0 |
attr |
QKVParallelLinear.num_kv_head_replicas |
1 | 0 | 0 |
meth |
UnquantizedLinearMethod.create_weights |
8 | 6 | 0 |
meth |
ColumnParallelLinear.init |
11 | 10 | 0 |
meth |
ColumnParallelLinear.weight_loader |
3 | 2 | 0 |
meth |
ColumnParallelLinear.weight_loader_v2 |
3 | 2 | 0 |
meth |
ColumnParallelLinear.forward |
2 | 1 | 0 |
attr |
ColumnParallelLinear.tp_rank |
1 | 0 | 0 |
attr |
ColumnParallelLinear.tp_size |
1 | 0 | 0 |
attr |
ColumnParallelLinear.input_size_per_partition |
1 | 0 | 0 |
attr |
ColumnParallelLinear.output_size_per_partition |
1 | 0 | 0 |
attr |
ColumnParallelLinear.output_partition_sizes |
1 | 0 | 0 |
attr |
ColumnParallelLinear.gather_output |
1 | 0 | 0 |
attr |
ColumnParallelLinear.bias |
1 | 0 | 0 |
meth |
LinearBase.init |
9 | 8 | 0 |
meth |
LinearBase.update_param_tp_status |
1 | 0 | 0 |
attr |
LinearBase.input_size |
1 | 0 | 0 |
attr |
LinearBase.output_size |
1 | 0 | 0 |
attr |
LinearBase.skip_bias_add |
1 | 0 | 0 |
attr |
LinearBase.params_dtype |
1 | 0 | 0 |
attr |
LinearBase.quant_config |
1 | 0 | 0 |
attr |
LinearBase.prefix |
1 | 0 | 0 |
attr |
LinearBase.allow_fp8_block_shape_mismatch |
1 | 0 | 0 |
attr |
LinearBase.return_bias |
1 | 0 | 0 |
attr |
LinearBase.disable_tp |
1 | 0 | 0 |
attr |
LinearBase.tp_rank |
1 | 0 | 0 |
attr |
LinearBase.tp_size |
1 | 0 | 0 |
meth |
RowParallelLinear.init |
12 | 11 | 0 |
meth |
RowParallelLinear.weight_loader |
3 | 2 | 0 |
meth |
RowParallelLinear.weight_loader_v2 |
3 | 2 | 0 |
meth |
RowParallelLinear.forward |
2 | 1 | 0 |
attr |
RowParallelLinear.tp_rank |
1 | 0 | 0 |
attr |
RowParallelLinear.tp_size |
1 | 0 | 0 |
attr |
RowParallelLinear.input_size_per_partition |
1 | 0 | 0 |
attr |
RowParallelLinear.output_size_per_partition |
1 | 0 | 0 |
attr |
RowParallelLinear.output_partition_sizes |
1 | 0 | 0 |
attr |
RowParallelLinear.input_is_parallel |
1 | 0 | 0 |
attr |
RowParallelLinear.reduce_results |
1 | 0 | 0 |
attr |
RowParallelLinear.bias |
1 | 0 | 0 |
meth |
ReplicatedLinear.init |
10 | 9 | 0 |
meth |
ReplicatedLinear.weight_loader |
3 | 2 | 0 |
attr |
ReplicatedLinear.output_partition_sizes |
1 | 0 | 0 |
attr |
ReplicatedLinear.bias |
1 | 0 | 0 |
meth |
MergedColumnParallelLinear.init |
11 | 10 | 0 |
meth |
MergedColumnParallelLinear.validate_shard_id |
2 | 1 | 0 |
meth |
MergedColumnParallelLinear.weight_loader |
4 | 3 | 0 |
meth |
MergedColumnParallelLinear._load_fused_module_from_checkpoint |
4 | 3 | 0 |
meth |
MergedColumnParallelLinear.weight_loader_v2 |
4 | 3 | 0 |
attr |
MergedColumnParallelLinear.output_sizes |
1 | 0 | 0 |
attr |
MergedColumnParallelLinear.tp_size |
1 | 0 | 0 |
attr |
MergedColumnParallelLinear.tp_rank |
1 | 0 | 0 |
vllm.model_executor.layers.logits_processor (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LogitsProcessor.scale |
1 | 0 | 0 |
attr |
LogitsProcessor.vocab_size |
1 | 0 | 0 |
attr |
LogitsProcessor.logits_as_input |
1 | 0 | 0 |
attr |
LogitsProcessor.org_vocab_size |
1 | 0 | 0 |
attr |
LogitsProcessor.soft_cap |
1 | 0 | 0 |
attr |
LogitsProcessor.use_all_gather |
1 | 0 | 0 |
vllm.model_executor.layers.mamba.linear_attn (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxText01LinearAttention._build_slope_tensor |
2 | 1 | 0 |
meth |
MiniMaxText01LinearAttention._prefill_and_mix_infer |
7 | 0 | 0 |
meth |
MiniMaxText01LinearAttention._decode_infer |
7 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.layer_idx |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.BLOCK |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.num_heads |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.head_dim |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.total_num_heads |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.hidden_inner_size |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.tp_size |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.tp_rank |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.tp_heads |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.qkv_size |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.tp_hidden |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.model_config |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.cache_config |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.prefix |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.qkv_proj |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.output_gate |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.out_proj |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.norm |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.tp_slope |
1 | 0 | 0 |
attr |
MiniMaxText01LinearAttention.slope_rate |
1 | 0 | 0 |
meth |
MiniMaxText01LinearKernel.jit_linear_forward_prefix |
9 | 8 | 0 |
attr |
MiniMaxText01RMSNormTP.tp_world |
1 | 0 | 0 |
attr |
MiniMaxText01RMSNormTP.tp_rank |
1 | 0 | 0 |
attr |
MiniMaxText01RMSNormTP.weight |
1 | 0 | 0 |
attr |
MiniMaxText01RMSNormTP.variance_epsilon |
1 | 0 | 0 |
vllm.model_executor.layers.mamba.mamba_mixer (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MambaMixer.init |
16 | 14 | 0 |
meth |
MambaMixer.forward |
3 | 2 | 0 |
meth |
MambaMixer.forward_impl |
3 | 2 | 0 |
attr |
MambaMixer.time_step_rank |
1 | 0 | 0 |
attr |
MambaMixer.ssm_state_size |
1 | 0 | 0 |
attr |
MambaMixer.use_rms_norm |
1 | 0 | 0 |
attr |
MambaMixer.activation |
1 | 0 | 0 |
attr |
MambaMixer.is_lora_enabled |
1 | 0 | 0 |
attr |
MambaMixer.conv_kernel_size |
1 | 0 | 0 |
attr |
MambaMixer.intermediate_size |
1 | 0 | 0 |
attr |
MambaMixer.conv1d |
1 | 0 | 0 |
attr |
MambaMixer.in_proj |
1 | 0 | 0 |
attr |
MambaMixer.x_proj |
1 | 0 | 0 |
attr |
MambaMixer.dt_proj |
1 | 0 | 0 |
attr |
MambaMixer.A |
1 | 0 | 0 |
attr |
MambaMixer.D |
1 | 0 | 0 |
attr |
MambaMixer.out_proj |
1 | 0 | 0 |
attr |
MambaMixer.dt_layernorm |
1 | 0 | 0 |
attr |
MambaMixer.b_layernorm |
1 | 0 | 0 |
attr |
MambaMixer.c_layernorm |
1 | 0 | 0 |
attr |
MambaMixer.kv_cache |
1 | 0 | 0 |
attr |
MambaMixer.model_config |
1 | 0 | 0 |
attr |
MambaMixer.cache_config |
1 | 0 | 0 |
attr |
MambaMixer.prefix |
1 | 0 | 0 |
vllm.model_executor.layers.mamba.mamba_mixer2 (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MambaMixer2.init |
17 | 16 | 0 |
meth |
MambaMixer2.forward |
3 | 2 | 0 |
meth |
MambaMixer2.conv_ssm_forward |
3 | 2 | 0 |
attr |
MambaMixer2.tp_size |
1 | 0 | 0 |
attr |
MambaMixer2.ssm_state_size |
1 | 0 | 0 |
attr |
MambaMixer2.conv_kernel_size |
1 | 0 | 0 |
attr |
MambaMixer2.activation |
1 | 0 | 0 |
attr |
MambaMixer2.intermediate_size |
1 | 0 | 0 |
attr |
MambaMixer2.head_dim |
1 | 0 | 0 |
attr |
MambaMixer2.num_heads |
1 | 0 | 0 |
attr |
MambaMixer2.n_groups |
1 | 0 | 0 |
attr |
MambaMixer2.groups_ssm_state_size |
1 | 0 | 0 |
attr |
MambaMixer2.conv_dim |
1 | 0 | 0 |
attr |
MambaMixer2.A |
1 | 0 | 0 |
attr |
MambaMixer2.D |
1 | 0 | 0 |
attr |
MambaMixer2.dt_bias |
1 | 0 | 0 |
attr |
MambaMixer2.use_rms_norm |
1 | 0 | 0 |
attr |
MambaMixer2.out_proj |
1 | 0 | 0 |
attr |
MambaMixer2.norm |
1 | 0 | 0 |
attr |
MambaMixer2.split_hidden_states_B_C_fn |
1 | 0 | 0 |
attr |
MambaMixer2.kv_cache |
1 | 0 | 0 |
attr |
MambaMixer2.model_config |
1 | 0 | 0 |
attr |
MambaMixer2.cache_config |
1 | 0 | 0 |
attr |
MambaMixer2.prefix |
1 | 0 | 0 |
attr |
MambaMixer2.num_spec |
1 | 0 | 0 |
attr |
MambaMixer2.tped_intermediate_size |
1 | 0 | 0 |
attr |
MambaMixer2.tped_conv_size |
1 | 0 | 0 |
attr |
MambaMixer2.tped_dt_size |
1 | 0 | 0 |
attr |
MambaMixer2.is_blackwell |
1 | 0 | 0 |
attr |
MambaMixer2.conv1d |
1 | 0 | 0 |
attr |
MambaMixer2.in_proj |
1 | 0 | 0 |
meth |
Mixer2RMSNormGated.init |
5 | 4 | 0 |
meth |
Mixer2RMSNormGated.forward_native |
3 | 2 | 0 |
attr |
Mixer2RMSNormGated.tp_size |
1 | 0 | 0 |
attr |
Mixer2RMSNormGated.tp_rank |
1 | 0 | 0 |
attr |
Mixer2RMSNormGated.full_hidden_size |
1 | 0 | 0 |
attr |
Mixer2RMSNormGated.group_size |
1 | 0 | 0 |
attr |
Mixer2RMSNormGated.per_rank_hidden_size |
1 | 0 | 0 |
attr |
Mixer2RMSNormGated.n_groups |
1 | 0 | 0 |
attr |
Mixer2RMSNormGated.variance_epsilon |
1 | 0 | 0 |
attr |
Mixer2RMSNormGated.use_rms_norm |
1 | 0 | 0 |
attr |
Mixer2RMSNormGated.weight |
1 | 0 | 0 |
vllm.model_executor.layers.mamba.mamba_utils (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MambaStateDtypeCalculator.kda_state_dtype |
3 | 2 | 0 |
meth |
MambaStateShapeCalculator.extra_groups_for_head_shards |
3 | 2 | 0 |
meth |
MambaStateShapeCalculator.gated_delta_net_state_shape |
8 | 7 | 0 |
meth |
MambaStateCopyFuncCalculator.linear_attention_state_copy_func |
1 | 0 | 0 |
meth |
MambaStateCopyFuncCalculator.mamba1_state_copy_func |
1 | 0 | 0 |
meth |
MambaStateCopyFuncCalculator.mamba2_state_copy_func |
1 | 0 | 0 |
meth |
MambaStateCopyFuncCalculator.short_conv_state_copy_func |
1 | 0 | 0 |
meth |
MambaStateCopyFuncCalculator.gated_delta_net_state_copy_func |
1 | 0 | 0 |
meth |
MambaStateCopyFuncCalculator.kda_state_copy_func |
1 | 0 | 0 |
vllm.model_executor.layers.mamba.ops.causal_conv1d (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
causal_conv1d_fn |
17 | 13 | 0 |
func |
causal_conv1d_update |
14 | 12 | 0 |
vllm.model_executor.layers.mamba.ops.layernorm_gated (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
rms_norm_gated |
8 | 0 | 0 |
vllm.model_executor.layers.mamba.ops.mamba_ssm (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
TRITON3 |
1 | 0 | 0 |
func |
selective_state_update |
18 | 0 | 0 |
func |
selective_scan_fn |
21 | 1 | 0 |
func |
softplus |
2 | 0 | 0 |
vllm.model_executor.layers.mamba.ops.ssd_chunk_scan (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
TRITON_22 |
1 | 0 | 0 |
vllm.model_executor.layers.mamba.ops.ssd_chunk_state (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
chunk_state_varlen |
8 | 0 | 0 |
vllm.model_executor.layers.mamba.ops.ssd_combined (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
TRITON_22 |
1 | 0 | 0 |
func |
is_int_pow_2 |
2 | 0 | 0 |
func |
mamba_chunk_scan_combined_varlen |
20 | 0 | 0 |
vllm.model_executor.layers.mamba.short_conv (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ShortConv.init |
7 | 5 | 0 |
meth |
ShortConv.forward_native |
3 | 2 | 0 |
meth |
ShortConv.forward |
3 | 2 | 0 |
meth |
ShortConv.forward_cuda |
3 | 2 | 0 |
attr |
ShortConv.config |
1 | 0 | 0 |
attr |
ShortConv.layer_idx |
1 | 0 | 0 |
attr |
ShortConv.conv_dim |
1 | 0 | 0 |
attr |
ShortConv.L_cache |
1 | 0 | 0 |
attr |
ShortConv.bias |
1 | 0 | 0 |
attr |
ShortConv.conv |
1 | 0 | 0 |
attr |
ShortConv.in_proj |
1 | 0 | 0 |
attr |
ShortConv.out_proj |
1 | 0 | 0 |
attr |
ShortConv.kv_cache |
1 | 0 | 0 |
attr |
ShortConv.model_config |
1 | 0 | 0 |
attr |
ShortConv.cache_config |
1 | 0 | 0 |
attr |
ShortConv.prefix |
1 | 0 | 0 |
vllm.model_executor.layers.mla (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MultiHeadLatentAttentionWrapper.hidden_size |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.qk_nope_head_dim |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.qk_rope_head_dim |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.qk_head_dim |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.v_head_dim |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.q_lora_rank |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.kv_lora_rank |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.num_heads |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.fused_qkv_a_proj |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.q_a_layernorm |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.q_b_proj |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.q_proj |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.kv_a_layernorm |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.kv_b_proj |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.rotary_emb |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.o_proj |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.indexer |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.indexer_rope_emb |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.is_sparse |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.mla_attn |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.prefix |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.topk_tokens |
1 | 0 | 0 |
attr |
MultiHeadLatentAttentionWrapper.topk_indices_buffer |
1 | 0 | 0 |
vllm.model_executor.layers.pooler.activations (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
resolve_classifier_act_fn |
4 | 3 | 0 |
meth |
LambdaPoolerActivation.init |
2 | 1 | 0 |
attr |
LambdaPoolerActivation.fn |
1 | 0 | 0 |
attr |
PoolerClassify.num_labels |
1 | 0 | 0 |
meth |
PoolerActivation.wraps |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.pooler.seqwise.heads (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
EmbeddingPoolerHead.projector |
1 | 0 | 0 |
attr |
EmbeddingPoolerHead.head_dtype |
1 | 0 | 0 |
attr |
EmbeddingPoolerHead.activation |
1 | 0 | 0 |
attr |
ClassifierPoolerHead.classifier |
1 | 0 | 0 |
attr |
ClassifierPoolerHead.logit_bias |
1 | 0 | 0 |
attr |
ClassifierPoolerHead.head_dtype |
1 | 0 | 0 |
attr |
ClassifierPoolerHead.activation |
1 | 0 | 0 |
vllm.model_executor.layers.pooler.seqwise.methods (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_seq_pooling_method |
2 | 1 | 0 |
vllm.model_executor.layers.pooler.seqwise.poolers (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
pooler_for_embed |
2 | 1 | 0 |
func |
pooler_for_classify |
5 | 4 | 0 |
attr |
SequencePooler.pooling |
1 | 0 | 0 |
attr |
SequencePooler.head |
1 | 0 | 0 |
vllm.model_executor.layers.pooler.special (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
BOSEOSFilter.pooler |
1 | 0 | 0 |
attr |
BOSEOSFilter.bos_token_id |
1 | 0 | 0 |
attr |
BOSEOSFilter.eos_token_id |
1 | 0 | 0 |
meth |
DispatchPooler.for_embedding |
2 | 1 | 0 |
meth |
DispatchPooler.for_seq_cls |
4 | 3 | 0 |
attr |
DispatchPooler.poolers_by_task |
1 | 0 | 0 |
vllm.model_executor.layers.pooler.tokwise.heads (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
TokenEmbeddingPoolerHead.head_dtype |
1 | 0 | 0 |
attr |
TokenEmbeddingPoolerHead.projector |
1 | 0 | 0 |
attr |
TokenEmbeddingPoolerHead.activation |
1 | 0 | 0 |
attr |
TokenClassifierPoolerHead.classifier |
1 | 0 | 0 |
attr |
TokenClassifierPoolerHead.logit_bias |
1 | 0 | 0 |
attr |
TokenClassifierPoolerHead.head_dtype |
1 | 0 | 0 |
attr |
TokenClassifierPoolerHead.activation |
1 | 0 | 0 |
vllm.model_executor.layers.pooler.tokwise.methods (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AllPool.init |
1 | 0 | 0 |
attr |
AllPool.enable_chunked_prefill |
1 | 0 | 0 |
func |
get_tok_pooling_method |
2 | 1 | 0 |
vllm.model_executor.layers.pooler.tokwise.poolers (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
pooler_for_token_classify |
5 | 4 | 0 |
attr |
TokenPooler.pooling |
1 | 0 | 0 |
attr |
TokenPooler.head |
1 | 0 | 0 |
vllm.model_executor.layers.quantization (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
register_quantization_config |
2 | 1 | 0 |
vllm.model_executor.layers.quantization.awq (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AWQConfig.apply_vllm_mapper |
2 | 1 | 0 |
meth |
AWQConfig.maybe_update_config |
3 | 2 | 0 |
attr |
AWQConfig.weight_bits |
1 | 0 | 0 |
attr |
AWQConfig.group_size |
1 | 0 | 0 |
attr |
AWQConfig.zero_point |
1 | 0 | 0 |
attr |
AWQConfig.modules_to_not_convert |
1 | 0 | 0 |
attr |
AWQConfig.pack_factor |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
AWQLinearMethod.init |
2 | 1 | 0 |
meth |
AWQLinearMethod.create_weights |
8 | 6 | 0 |
attr |
AWQLinearMethod.quant_config |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.awq_marlin (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AWQMarlinConfig.override_quantization_method |
3 | 1 | 0 |
meth |
AWQMarlinConfig.is_awq_marlin_compatible |
2 | 1 | 0 |
meth |
AWQMarlinConfig.apply_vllm_mapper |
2 | 1 | 0 |
meth |
AWQMarlinConfig.maybe_update_config |
3 | 2 | 0 |
attr |
AWQMarlinConfig.pack_factor |
1 | 0 | 0 |
attr |
AWQMarlinConfig.group_size |
1 | 0 | 0 |
attr |
AWQMarlinConfig.zero_point |
1 | 0 | 0 |
attr |
AWQMarlinConfig.lm_head_quantized |
1 | 0 | 0 |
attr |
AWQMarlinConfig.weight_bits |
1 | 0 | 0 |
attr |
AWQMarlinConfig.modules_to_not_convert |
1 | 0 | 0 |
attr |
AWQMarlinConfig.full_config |
1 | 0 | 0 |
attr |
AWQMarlinConfig.quant_type |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
AWQMarlinLinearMethod.create_weights |
8 | 7 | 0 |
attr |
AWQMarlinLinearMethod.quant_config |
1 | 0 | 0 |
attr |
AWQMarlinLinearMethod.quant_type |
1 | 0 | 0 |
attr |
AWQMarlinLinearMethod.input_dtype |
1 | 0 | 0 |
meth |
AWQMarlinMoEMethod.init |
3 | 2 | 0 |
meth |
AWQMarlinMoEMethod.create_weights |
7 | 5 | 0 |
meth |
AWQMarlinMoEMethod.select_gemm_impl |
3 | 1 | 0 |
attr |
AWQMarlinMoEMethod.quant_config |
1 | 0 | 0 |
attr |
AWQMarlinMoEMethod.quant_type |
1 | 0 | 0 |
attr |
AWQMarlinMoEMethod.input_dtype |
1 | 0 | 0 |
attr |
AWQMarlinMoEMethod.use_marlin |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.awq_triton (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
awq_gemm_kernel |
14 | 4 | 0 |
func |
awq_dequantize_kernel |
10 | 2 | 0 |
vllm.model_executor.layers.quantization.base_config (12 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuantizationConfig.init |
1 | 0 | 0 |
meth |
QuantizationConfig.override_quantization_method |
3 | 1 | 0 |
meth |
QuantizationConfig.get_from_keys |
3 | 3 | 1 |
meth |
QuantizationConfig.get_from_keys_or |
4 | 4 | 2 |
meth |
QuantizationConfig.apply_vllm_mapper |
2 | 1 | 0 |
meth |
QuantizationConfig.maybe_update_config |
2 | 1 | 0 |
meth |
QuantizeMethodBase.create_weights |
4 | 1 | 0 |
meth |
QuantizeMethodBase.apply |
4 | 2 | 0 |
meth |
QuantizeMethodBase.embedding |
4 | 2 | 0 |
vllm.model_executor.layers.quantization.bitsandbytes (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BitsAndBytesMoEMethod.init |
3 | 2 | 0 |
meth |
BitsAndBytesMoEMethod.create_weights |
7 | 5 | 0 |
meth |
BitsAndBytesMoEMethod._create_weights_4bit |
7 | 5 | 0 |
meth |
BitsAndBytesMoEMethod._create_weights_8bit |
7 | 5 | 0 |
attr |
BitsAndBytesMoEMethod.quant_config |
1 | 0 | 0 |
func |
is_layer_skipped_bnb |
3 | 2 | 0 |
attr |
BitsAndBytesConfig.load_in_8bit |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.load_in_4bit |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.bnb_4bit_compute_dtype |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.bnb_4bit_quant_storage |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.bnb_4bit_quant_type |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.bnb_4bit_use_double_quant |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.llm_int8_enable_fp32_cpu_offload |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.llm_int8_has_fp16_weight |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.llm_int8_skip_modules |
1 | 0 | 0 |
attr |
BitsAndBytesConfig.llm_int8_threshold |
1 | 0 | 0 |
meth |
BitsAndBytesLinearMethod.init |
2 | 1 | 0 |
meth |
BitsAndBytesLinearMethod.create_weights |
8 | 6 | 0 |
attr |
BitsAndBytesLinearMethod.quant_config |
1 | 0 | 0 |
func |
calculate_quant_ratio |
2 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsLinearMethod.init |
2 | 1 | 0 |
meth |
CompressedTensorsLinearMethod.create_weights |
8 | 6 | 0 |
meth |
CompressedTensorsLinearMethod.apply |
4 | 3 | 0 |
attr |
CompressedTensorsLinearMethod.quantization_config |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors_moe (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsWNA16MarlinMoEMethod.init |
5 | 4 | 0 |
meth |
CompressedTensorsWNA16MarlinMoEMethod.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.weight_quant |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.input_quant |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.num_bits |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.packed_factor |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.strategy |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.group_size |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.actorder |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.quant_type |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.marlin_input_dtype |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.use_flashinfer_mxint4_moe |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MarlinMoEMethod.kernel_backend |
1 | 0 | 0 |
meth |
CompressedTensorsW4A8Int8MoEMethod.init |
5 | 4 | 0 |
meth |
CompressedTensorsW4A8Int8MoEMethod.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsW4A8Int8MoEMethod.has_bias |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Int8MoEMethod.weight_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Int8MoEMethod.input_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Int8MoEMethod.group_size |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Int8MoEMethod.static_input_scales |
1 | 0 | 0 |
meth |
CompressedTensorsW8A8Int8MoEMethod.init |
5 | 4 | 0 |
meth |
CompressedTensorsW8A8Int8MoEMethod.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsW8A8Int8MoEMethod.weight_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Int8MoEMethod.input_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Int8MoEMethod.static_input_scales |
1 | 0 | 0 |
meth |
CompressedTensorsW4A4Nvfp4MoEMethod.init |
4 | 3 | 0 |
meth |
CompressedTensorsW4A4Nvfp4MoEMethod.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsW4A4Nvfp4MoEMethod.group_size |
1 | 0 | 0 |
attr |
CompressedTensorsW4A4Nvfp4MoEMethod.use_global_sf |
1 | 0 | 0 |
meth |
CompressedTensorsWNA16MoEMethod.init |
5 | 4 | 0 |
meth |
CompressedTensorsWNA16MoEMethod.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsWNA16MoEMethod.weight_quant |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MoEMethod.input_quant |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MoEMethod.num_bits |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MoEMethod.packed_factor |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MoEMethod.strategy |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16MoEMethod.group_size |
1 | 0 | 0 |
meth |
CompressedTensorsW8A8Fp8MoEMethod.init |
5 | 4 | 0 |
meth |
CompressedTensorsW8A8Fp8MoEMethod.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsW8A8Fp8MoEMethod.weight_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8MoEMethod.input_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8MoEMethod.block_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8MoEMethod.static_input_scales |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8MoEMethod.weight_block_size |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_24 (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensors24.init |
5 | 4 | 0 |
meth |
CompressedTensors24.create_weights |
8 | 6 | 0 |
attr |
CompressedTensors24.quantized |
1 | 0 | 0 |
attr |
CompressedTensors24.weight_quant |
1 | 0 | 0 |
attr |
CompressedTensors24.input_quant |
1 | 0 | 0 |
attr |
CompressedTensors24.do_sparse_decompress |
1 | 0 | 0 |
attr |
CompressedTensors24.model_compressor |
1 | 0 | 0 |
attr |
CompressedTensors24.quant_fp8 |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_scheme (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsScheme.create_weights |
3 | 0 | 0 |
meth |
CompressedTensorsScheme.apply_weights |
4 | 3 | 0 |
meth |
CompressedTensorsScheme.process_weights_after_loading |
2 | 1 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_w4a16_mxfp4 (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsW4A16Mxfp4.init |
1 | 0 | 0 |
meth |
CompressedTensorsW4A16Mxfp4.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsW4A16Mxfp4.group_size |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_w4a16_nvfp4 (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsW4A16Fp4.init |
1 | 0 | 0 |
meth |
CompressedTensorsW4A16Fp4.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsW4A16Fp4.group_size |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_w4a4_nvfp4 (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsW4A4Fp4.init |
1 | 0 | 0 |
meth |
CompressedTensorsW4A4Fp4.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsW4A4Fp4.backend |
1 | 0 | 0 |
attr |
CompressedTensorsW4A4Fp4.group_size |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_w4a8_fp8 (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsW4A8Fp8.init |
6 | 5 | 0 |
meth |
CompressedTensorsW4A8Fp8.create_weights |
9 | 7 | 0 |
attr |
CompressedTensorsW4A8Fp8.pack_factor |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Fp8.strategy |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Fp8.symmetric |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Fp8.group_size |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Fp8.has_g_idx |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Fp8.quant_type |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_w4a8_int (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsW4A8Int.init |
6 | 5 | 0 |
meth |
CompressedTensorsW4A8Int.create_weights |
9 | 7 | 0 |
attr |
CompressedTensorsW4A8Int.strategy |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Int.group_size |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Int.is_static_input_scheme |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Int.input_symmetric |
1 | 0 | 0 |
attr |
CompressedTensorsW4A8Int.quant_type |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_w8a16_fp8 (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsW8A16Fp8.init |
3 | 2 | 0 |
meth |
CompressedTensorsW8A16Fp8.create_weights |
9 | 7 | 0 |
attr |
CompressedTensorsW8A16Fp8.weight_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW8A16Fp8.strategy |
1 | 0 | 0 |
attr |
CompressedTensorsW8A16Fp8.is_static_input_scheme |
1 | 0 | 0 |
attr |
CompressedTensorsW8A16Fp8.weight_block_size |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_w8a8_fp8 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsW8A8Fp8.init |
3 | 2 | 0 |
meth |
CompressedTensorsW8A8Fp8.create_weights |
9 | 7 | 0 |
meth |
CompressedTensorsW8A8Fp8.process_weights_after_loading |
2 | 1 | 0 |
attr |
CompressedTensorsW8A8Fp8.weight_quant |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.strategy |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.out_dtype |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.is_static_input_scheme |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.weight_block_size |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.cutlass_block_fp8_supported |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.use_aiter_and_is_supported |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.act_q_group_shape |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.w8a8_block_fp8_linear |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Fp8.fp8_linear |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_w8a8_int8 (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsW8A8Int8.init |
4 | 3 | 0 |
meth |
CompressedTensorsW8A8Int8.create_weights |
7 | 5 | 0 |
attr |
CompressedTensorsW8A8Int8.strategy |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Int8.is_static_input_scheme |
1 | 0 | 0 |
attr |
CompressedTensorsW8A8Int8.input_symmetric |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.schemes.compressed_tensors_wNa16 (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
WNA16_SUPPORTED_BITS |
1 | 0 | 0 |
meth |
CompressedTensorsWNA16.init |
7 | 6 | 0 |
meth |
CompressedTensorsWNA16.create_weights |
9 | 7 | 0 |
attr |
CompressedTensorsWNA16.pack_factor |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16.strategy |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16.symmetric |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16.group_size |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16.has_g_idx |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16.layer_name |
1 | 0 | 0 |
attr |
CompressedTensorsWNA16.quant_type |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.transform.linear (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CompressedTensorsLinearTransformMethod.init |
4 | 3 | 0 |
meth |
CompressedTensorsLinearTransformMethod.create_weights |
8 | 6 | 0 |
meth |
CompressedTensorsLinearTransformMethod.process_weights_after_loading |
2 | 0 | 0 |
meth |
CompressedTensorsLinearTransformMethod._validate_tfm_schemes |
2 | 1 | 0 |
attr |
CompressedTensorsLinearTransformMethod.quant_method |
1 | 0 | 0 |
attr |
CompressedTensorsLinearTransformMethod.input_tfms |
1 | 0 | 0 |
attr |
CompressedTensorsLinearTransformMethod.output_tfms |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.transform.module (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HadamardTransform.init |
6 | 5 | 0 |
meth |
HadamardTransform.process_weights_after_loading |
1 | 0 | 0 |
meth |
HadamardTransform._validate_input_transforms |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.transform.schemes.linear_qutlass_nvfp4 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QutlassNvFP4LinearMethod.create_weights |
8 | 0 | 0 |
vllm.model_executor.layers.quantization.compressed_tensors.triton_scaled_mm (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
triton_scaled_mm |
11 | 10 | 0 |
func |
scaled_mm_kernel |
22 | 6 | 0 |
func |
is_weak_contiguous |
2 | 1 | 0 |
vllm.model_executor.layers.quantization.cpu_wna16 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CPUAWQConfig.override_quantization_method |
3 | 1 | 0 |
meth |
CPUAWQConfig.apply_vllm_mapper |
2 | 1 | 0 |
meth |
CPUAWQConfig.maybe_update_config |
3 | 2 | 0 |
attr |
CPUAWQConfig.pack_factor |
1 | 0 | 0 |
attr |
CPUAWQConfig.group_size |
1 | 0 | 0 |
attr |
CPUAWQConfig.zero_point |
1 | 0 | 0 |
attr |
CPUAWQConfig.lm_head_quantized |
1 | 0 | 0 |
attr |
CPUAWQConfig.weight_bits |
1 | 0 | 0 |
attr |
CPUAWQConfig.modules_to_not_convert |
1 | 0 | 0 |
attr |
CPUAWQConfig.full_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
CPUAWQLinearMethod.create_weights |
8 | 7 | 0 |
attr |
CPUAWQLinearMethod.quant_config |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.experts_int8 (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExpertsInt8MoEMethod.init |
3 | 2 | 0 |
meth |
ExpertsInt8MoEMethod.create_weights |
7 | 5 | 0 |
meth |
ExpertsInt8MoEMethod.quantizing_weight_loader |
3 | 0 | 0 |
attr |
ExpertsInt8MoEMethod.quant_config |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.fbgemm_fp8 (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FBGEMMFp8LinearMethod.init |
2 | 1 | 0 |
meth |
FBGEMMFp8LinearMethod.create_weights |
8 | 6 | 0 |
attr |
FBGEMMFp8LinearMethod.quant_config |
1 | 0 | 0 |
attr |
FBGEMMFp8LinearMethod.out_dtype |
1 | 0 | 0 |
attr |
FBGEMMFp8LinearMethod.fp8_linear |
1 | 0 | 0 |
meth |
FBGEMMFp8Config.init |
3 | 2 | 0 |
attr |
FBGEMMFp8Config.ignore_list |
1 | 0 | 0 |
attr |
FBGEMMFp8Config.input_scale_ub |
1 | 0 | 0 |
attr |
FBGEMMFp8Config.use_marlin |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.fp8 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Fp8MoEMethod.init |
3 | 2 | 0 |
meth |
Fp8MoEMethod.create_weights |
7 | 5 | 0 |
attr |
Fp8MoEMethod.quant_config |
1 | 0 | 0 |
attr |
Fp8MoEMethod.weight_block_size |
1 | 0 | 0 |
attr |
Fp8MoEMethod.weight_scale_name |
1 | 0 | 0 |
meth |
Fp8Config.apply_vllm_mapper |
2 | 1 | 0 |
attr |
Fp8Config.is_checkpoint_fp8_serialized |
1 | 0 | 0 |
attr |
Fp8Config.activation_scheme |
1 | 0 | 0 |
attr |
Fp8Config.ignored_layers |
1 | 0 | 0 |
attr |
Fp8Config.weight_block_size |
1 | 0 | 0 |
meth |
Fp8OnlineLinearMethod.create_weights |
8 | 6 | 0 |
meth |
CopyNumelCounter.init |
1 | 0 | 0 |
meth |
CopyNumelCounter.torch_dispatch |
5 | 0 | 0 |
attr |
CopyNumelCounter.copied_numel |
1 | 0 | 0 |
meth |
Fp8LinearMethod.init |
2 | 1 | 0 |
meth |
Fp8LinearMethod.create_weights |
8 | 6 | 0 |
attr |
Fp8LinearMethod.quant_config |
1 | 0 | 0 |
attr |
Fp8LinearMethod.cutlass_block_fp8_supported |
1 | 0 | 0 |
attr |
Fp8LinearMethod.out_dtype |
1 | 0 | 0 |
attr |
Fp8LinearMethod.marlin_input_dtype |
1 | 0 | 0 |
attr |
Fp8LinearMethod.use_marlin |
1 | 0 | 0 |
attr |
Fp8LinearMethod.use_aiter_and_is_supported |
1 | 0 | 0 |
attr |
Fp8LinearMethod.use_deep_gemm |
1 | 0 | 0 |
attr |
Fp8LinearMethod.weight_block_size |
1 | 0 | 0 |
attr |
Fp8LinearMethod.block_quant |
1 | 0 | 0 |
attr |
Fp8LinearMethod.act_q_static |
1 | 0 | 0 |
attr |
Fp8LinearMethod.w8a8_block_fp8_linear |
1 | 0 | 0 |
attr |
Fp8LinearMethod.fp8_linear |
1 | 0 | 0 |
meth |
Fp8OnlineMoEMethod.init |
3 | 2 | 0 |
meth |
Fp8OnlineMoEMethod.create_weights |
7 | 5 | 0 |
meth |
Fp8KVCacheMethod.init |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.fp_quant (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
FPQuantConfig.hadamard_group_size |
1 | 0 | 0 |
attr |
FPQuantConfig.forward_dtype |
1 | 0 | 0 |
attr |
FPQuantConfig.forward_method |
1 | 0 | 0 |
attr |
FPQuantConfig.pseudoquantization |
1 | 0 | 0 |
attr |
FPQuantConfig.modules_to_not_convert |
1 | 0 | 0 |
func |
fused_quantize_mx_fake |
4 | 0 | 0 |
func |
matmul_mxf4_bf16_fake |
6 | 0 | 0 |
func |
matmul_nvf4_bf16_fake |
6 | 0 | 0 |
meth |
FPQuantLinearMethod.init |
2 | 1 | 0 |
meth |
FPQuantLinearMethod.create_weights |
8 | 6 | 0 |
attr |
FPQuantLinearMethod.quant_config |
1 | 0 | 0 |
func |
fused_quantize_nv_fake |
4 | 0 | 0 |
vllm.model_executor.layers.quantization.gguf (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_layer_skipped_gguf |
4 | 3 | 0 |
meth |
GGUFMoEMethod.init |
3 | 2 | 0 |
meth |
GGUFMoEMethod.create_weights |
7 | 5 | 0 |
attr |
GGUFMoEMethod.quant_config |
1 | 0 | 0 |
meth |
GGUFConfig.apply_vllm_mapper |
2 | 1 | 0 |
attr |
GGUFConfig.unquantized_modules |
1 | 0 | 0 |
meth |
GGUFLinearMethod.init |
2 | 1 | 0 |
meth |
GGUFLinearMethod.create_weights |
8 | 6 | 0 |
meth |
GGUFLinearMethod.process_weights_after_loading |
2 | 1 | 0 |
meth |
GGUFLinearMethod._create_padded_weight_param |
2 | 1 | 0 |
attr |
GGUFLinearMethod.quant_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.gptq (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTQLinearMethod.init |
2 | 1 | 0 |
meth |
GPTQLinearMethod.create_weights |
8 | 6 | 0 |
attr |
GPTQLinearMethod.quant_config |
1 | 0 | 0 |
attr |
GPTQLinearMethod.use_v2_format |
1 | 0 | 0 |
meth |
GPTQConfig.apply_vllm_mapper |
2 | 1 | 0 |
meth |
GPTQConfig.maybe_update_config |
3 | 2 | 0 |
attr |
GPTQConfig.dynamic |
1 | 0 | 0 |
attr |
GPTQConfig.weight_bits |
1 | 0 | 0 |
attr |
GPTQConfig.group_size |
1 | 0 | 0 |
attr |
GPTQConfig.desc_act |
1 | 0 | 0 |
attr |
GPTQConfig.lm_head_quantized |
1 | 0 | 0 |
attr |
GPTQConfig.pack_factor |
1 | 0 | 0 |
attr |
GPTQConfig.modules_in_block_to_quantize |
1 | 0 | 0 |
attr |
GPTQConfig.autoround_version |
1 | 0 | 0 |
attr |
GPTQConfig.checkpoint_format |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.gptq_marlin (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTQMarlinMoEMethod.create_weights |
7 | 5 | 0 |
meth |
GPTQMarlinMoEMethod.select_gemm_impl |
3 | 1 | 0 |
attr |
GPTQMarlinMoEMethod.quant_config |
1 | 0 | 0 |
attr |
GPTQMarlinMoEMethod.input_dtype |
1 | 0 | 0 |
attr |
GPTQMarlinMoEMethod.use_marlin |
1 | 0 | 0 |
attr |
GPTQMarlinMoEMethod.quant_type |
1 | 0 | 0 |
meth |
GPTQMarlinLinearMethod.create_weights |
8 | 7 | 0 |
attr |
GPTQMarlinLinearMethod.quant_config |
1 | 0 | 0 |
attr |
GPTQMarlinLinearMethod.input_dtype |
1 | 0 | 0 |
attr |
GPTQMarlinLinearMethod.quant_type |
1 | 0 | 0 |
meth |
GPTQMarlinConfig.override_quantization_method |
3 | 1 | 0 |
meth |
GPTQMarlinConfig.is_gptq_marlin_compatible |
2 | 1 | 0 |
meth |
GPTQMarlinConfig.apply_vllm_mapper |
2 | 0 | 0 |
meth |
GPTQMarlinConfig.maybe_update_config |
3 | 2 | 0 |
attr |
GPTQMarlinConfig.dynamic |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.weight_bits |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.is_sym |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.pack_factor |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.group_size |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.desc_act |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.lm_head_quantized |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.full_config |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.quant_type |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.modules_in_block_to_quantize |
1 | 0 | 0 |
attr |
GPTQMarlinConfig.autoround_version |
1 | 0 | 0 |
func |
get_moe_quant_method |
5 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.inc (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
INCConfig.get_layer_config |
3 | 1 | 0 |
meth |
INCConfig.apply_vllm_mapper |
2 | 1 | 0 |
meth |
INCConfig.apply_awq_quant_layer |
4 | 2 | 0 |
meth |
INCConfig.apply_gptq_quant_layer |
4 | 2 | 0 |
meth |
INCConfig.apply_ipex_quant_layer |
3 | 1 | 0 |
meth |
INCConfig.get_quant_method |
3 | 2 | 0 |
meth |
INCConfig.override_quantization_method |
3 | 1 | 0 |
attr |
INCConfig.weight_bits |
1 | 0 | 0 |
attr |
INCConfig.group_size |
1 | 0 | 0 |
attr |
INCConfig.sym |
1 | 0 | 0 |
attr |
INCConfig.packing_format |
1 | 0 | 0 |
attr |
INCConfig.block_name_to_quantize |
1 | 0 | 0 |
attr |
INCConfig.extra_config |
1 | 0 | 0 |
attr |
INCConfig.data_type |
1 | 0 | 0 |
attr |
INCConfig.backend |
1 | 0 | 0 |
attr |
INCConfig.pack_factor |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.input_quant_fp8 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuantFP8.init |
8 | 7 | 0 |
meth |
QuantFP8.forward_native |
5 | 4 | 0 |
attr |
QuantFP8.static |
1 | 0 | 0 |
attr |
QuantFP8.group_shape |
1 | 0 | 0 |
attr |
QuantFP8.use_per_token_if_dynamic |
1 | 0 | 0 |
attr |
QuantFP8.num_token_padding |
1 | 0 | 0 |
attr |
QuantFP8.column_major_scales |
1 | 0 | 0 |
attr |
QuantFP8.tma_aligned_scales |
1 | 0 | 0 |
attr |
QuantFP8.use_ue8m0 |
1 | 0 | 0 |
attr |
QuantFP8.use_deep_gemm_supported |
1 | 0 | 0 |
attr |
QuantFP8.use_aiter |
1 | 0 | 0 |
attr |
QuantFP8.is_group_quant |
1 | 0 | 0 |
attr |
QuantFP8.group_size |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.kv_cache (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
BaseKVCacheMethod.init |
2 | 1 | 0 |
meth |
BaseKVCacheMethod.create_weights |
2 | 1 | 0 |
attr |
BaseKVCacheMethod.quant_config |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.modelopt (55 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
ModelOptFp8PcPtLinearMethod.create_weights |
8 | 6 | 0 |
attr |
ModelOptFp8PcPtLinearMethod.quant_config |
1 | 0 | 0 |
attr |
ModelOptFp8PcPtLinearMethod.fp8_linear |
1 | 0 | 0 |
meth |
ModelOptMxFp8LinearMethod.create_weights |
8 | 6 | 0 |
attr |
ModelOptMxFp8LinearMethod.quant_config |
1 | 0 | 0 |
attr |
ModelOptMxFp8LinearMethod.mxfp8_linear_op |
1 | 0 | 0 |
meth |
ModelOptMixedPrecisionConfig.override_quantization_method |
3 | 1 | 0 |
meth |
ModelOptMixedPrecisionConfig._from_config |
7 | 7 | 1 |
meth |
ModelOptMixedPrecisionConfig.apply_vllm_mapper |
2 | 1 | 0 |
attr |
ModelOptMixedPrecisionConfig.kv_cache_quant_method |
1 | 0 | 0 |
attr |
ModelOptMixedPrecisionConfig.quantized_layers |
1 | 0 | 0 |
attr |
ModelOptMixedPrecisionConfig.fp8_config |
1 | 0 | 0 |
attr |
ModelOptMixedPrecisionConfig.nvfp4_config |
1 | 0 | 0 |
meth |
ModelOptQuantConfigBase.init |
2 | 1 | 0 |
meth |
ModelOptQuantConfigBase.apply_vllm_mapper |
2 | 1 | 0 |
meth |
ModelOptFp8PbWoLinearMethod.create_weights |
8 | 6 | 0 |
attr |
ModelOptFp8PbWoLinearMethod.quant_config |
1 | 0 | 0 |
attr |
ModelOptFp8PbWoLinearMethod.weight_block_size |
1 | 0 | 0 |
attr |
ModelOptFp8PbWoLinearMethod.w8a8_block_fp8_linear |
1 | 0 | 0 |
meth |
ModelOptFp8MoEMethod.create_weights |
7 | 5 | 0 |
meth |
ModelOptFp8MoEMethod._setup_kernel |
8 | 7 | 0 |
attr |
ModelOptFp8MoEMethod.quant_config |
1 | 0 | 0 |
meth |
ModelOptNvFp4Config.override_quantization_method |
3 | 1 | 0 |
meth |
ModelOptNvFp4Config._from_config |
7 | 7 | 1 |
attr |
ModelOptNvFp4Config.is_checkpoint_nvfp4_serialized |
1 | 0 | 0 |
attr |
ModelOptNvFp4Config.group_size |
1 | 0 | 0 |
attr |
ModelOptNvFp4Config.kv_cache_quant_algo |
1 | 0 | 0 |
meth |
ModelOptNvFp4LinearMethod.create_weights |
8 | 6 | 0 |
attr |
ModelOptNvFp4LinearMethod.quant_config |
1 | 0 | 0 |
attr |
ModelOptNvFp4LinearMethod.marlin_input_dtype |
1 | 0 | 0 |
attr |
ModelOptNvFp4LinearMethod.backend |
1 | 0 | 0 |
meth |
ModelOptMxFp8Config.override_quantization_method |
3 | 1 | 0 |
meth |
ModelOptMxFp8Config._from_config |
6 | 6 | 1 |
attr |
ModelOptMxFp8Config.is_checkpoint_mxfp8_serialized |
1 | 0 | 0 |
attr |
ModelOptMxFp8Config.kv_cache_quant_algo |
1 | 0 | 0 |
meth |
ModelOptFp8KVCacheMethod.init |
2 | 1 | 0 |
meth |
ModelOptFp8Config.override_quantization_method |
3 | 1 | 0 |
meth |
ModelOptFp8Config._from_config |
6 | 6 | 1 |
attr |
ModelOptFp8Config.quant_method |
1 | 0 | 0 |
attr |
ModelOptFp8Config.is_checkpoint_fp8_serialized |
1 | 0 | 0 |
attr |
ModelOptFp8Config.kv_cache_quant_method |
1 | 0 | 0 |
meth |
ModelOptFp8LinearMethod.create_weights |
8 | 6 | 0 |
attr |
ModelOptFp8LinearMethod.quant_config |
1 | 0 | 0 |
attr |
ModelOptFp8LinearMethod.fp8_linear |
1 | 0 | 0 |
meth |
ModelOptNvFp4FusedMoE.create_weights |
7 | 5 | 0 |
attr |
ModelOptNvFp4FusedMoE.quant_config |
1 | 0 | 0 |
attr |
ModelOptNvFp4FusedMoE.use_global_sf |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.moe_wna16 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoeWNA16Config.override_quantization_method |
3 | 1 | 0 |
meth |
MoeWNA16Config.is_moe_wna16_compatible |
2 | 1 | 0 |
attr |
MoeWNA16Config.weight_bits |
1 | 0 | 0 |
attr |
MoeWNA16Config.group_size |
1 | 0 | 0 |
attr |
MoeWNA16Config.has_zp |
1 | 0 | 0 |
attr |
MoeWNA16Config.bit8_pack_factor |
1 | 0 | 0 |
attr |
MoeWNA16Config.lm_head_quantized |
1 | 0 | 0 |
attr |
MoeWNA16Config.linear_quant_method |
1 | 0 | 0 |
attr |
MoeWNA16Config.full_config |
1 | 0 | 0 |
attr |
MoeWNA16Config.use_marlin |
1 | 0 | 0 |
attr |
MoeWNA16Config.modules_to_not_convert |
1 | 0 | 0 |
func |
is_layer_skipped_quant |
3 | 2 | 0 |
meth |
MoeWNA16Method.create_weights |
7 | 5 | 0 |
meth |
MoeWNA16Method.get_weight_loader |
3 | 0 | 0 |
attr |
MoeWNA16Method.quant_config |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.mxfp4 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XpuMxfp4MoEMethod.init |
2 | 1 | 0 |
meth |
XpuMxfp4MoEMethod.create_weights |
7 | 5 | 0 |
attr |
XpuMxfp4MoEMethod.moe_config |
1 | 0 | 0 |
meth |
Mxfp4Config.init |
2 | 1 | 0 |
meth |
Mxfp4Config.from_config |
2 | 0 | 0 |
attr |
Mxfp4Config.ignored_layers |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Mxfp4MoEMethod.init |
2 | 1 | 0 |
meth |
Mxfp4MoEMethod.create_weights |
7 | 5 | 0 |
meth |
Mxfp4MoEMethod.process_weights_after_loading |
2 | 0 | 0 |
attr |
Mxfp4MoEMethod.weight_dtype |
1 | 0 | 0 |
attr |
Mxfp4MoEMethod.mxfp4_backend |
1 | 0 | 0 |
attr |
Mxfp4MoEMethod.max_capture_size |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.petit (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PetitNvFp4LinearMethod.init |
2 | 1 | 0 |
meth |
PetitNvFp4LinearMethod.create_weights |
8 | 6 | 0 |
attr |
PetitNvFp4LinearMethod.quant_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
PetitFp8KVCacheMethod.init |
2 | 1 | 0 |
meth |
PetitNvFp4Config.override_quantization_method |
3 | 1 | 0 |
attr |
PetitNvFp4Config.is_checkpoint_nvfp4_serialized |
1 | 0 | 0 |
attr |
PetitNvFp4Config.group_size |
1 | 0 | 0 |
attr |
PetitNvFp4Config.kv_cache_quant_algo |
1 | 0 | 0 |
attr |
PetitNvFp4Config.exclude_modules |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.ptpc_fp8 (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PTPCFp8LinearMethod.init |
2 | 1 | 0 |
attr |
PTPCFp8LinearMethod.fp8_linear |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.quark.quark (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuarkLinearMethod.init |
2 | 1 | 0 |
meth |
QuarkLinearMethod.create_weights |
8 | 6 | 0 |
meth |
QuarkLinearMethod.apply |
4 | 3 | 0 |
attr |
QuarkLinearMethod.quantization_config |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.quark.quark_moe (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuarkMoEMethod.init |
2 | 1 | 0 |
attr |
QuarkMoEMethod.has_bias |
1 | 0 | 0 |
meth |
QuarkOCP_MX_MoEMethod_OSS.init |
4 | 3 | 0 |
meth |
QuarkOCP_MX_MoEMethod_OSS.process_weights_after_loading |
2 | 0 | 0 |
meth |
QuarkOCP_MX_MoEMethod.init |
4 | 3 | 0 |
meth |
QuarkOCP_MX_MoEMethod.get_packed_dim |
3 | 2 | 0 |
meth |
QuarkOCP_MX_MoEMethod.create_weights |
7 | 5 | 0 |
meth |
QuarkOCP_MX_MoEMethod.process_weights_after_loading |
2 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.weight_quant |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.input_quant |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.weight_dtype |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.fp4_dtype |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.ocp_mx_scheme |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.use_rocm_aiter_moe |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.model_type |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.emulate |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.static_input_scales |
1 | 0 | 0 |
attr |
QuarkOCP_MX_MoEMethod.input_dtype |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.quark.schemes.quark_ocp_mx (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuarkOCP_MX.init |
4 | 3 | 0 |
meth |
QuarkOCP_MX.get_packed_dim |
3 | 2 | 0 |
meth |
QuarkOCP_MX.create_weights |
7 | 5 | 0 |
attr |
QuarkOCP_MX.out_dtype |
1 | 0 | 0 |
attr |
QuarkOCP_MX.qscheme |
1 | 0 | 0 |
attr |
QuarkOCP_MX.weight_quant_spec |
1 | 0 | 0 |
attr |
QuarkOCP_MX.input_quant_spec |
1 | 0 | 0 |
attr |
QuarkOCP_MX.dynamic_mxfp4_quant |
1 | 0 | 0 |
attr |
QuarkOCP_MX.weight_dtype |
1 | 0 | 0 |
attr |
QuarkOCP_MX.input_dtype |
1 | 0 | 0 |
attr |
QuarkOCP_MX.ocp_mx_scheme |
1 | 0 | 0 |
attr |
QuarkOCP_MX.static_input_scales |
1 | 0 | 0 |
attr |
QuarkOCP_MX.emulate |
1 | 0 | 0 |
attr |
QuarkOCP_MX.rocm_use_aiter_fp4_asm_gemm |
1 | 0 | 0 |
attr |
QuarkOCP_MX.dequant_func |
1 | 0 | 0 |
attr |
QuarkOCP_MX.quant_dequant_func |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.quark.schemes.quark_scheme (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuarkScheme.create_weights |
3 | 0 | 0 |
meth |
QuarkScheme.apply_weights |
4 | 3 | 0 |
meth |
QuarkScheme.process_weights_after_loading |
2 | 1 | 0 |
vllm.model_executor.layers.quantization.quark.schemes.quark_w8a8_fp8 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuarkW8A8Fp8.init |
3 | 2 | 0 |
meth |
QuarkW8A8Fp8.process_weights_after_loading |
2 | 1 | 0 |
meth |
QuarkW8A8Fp8.create_weights |
7 | 5 | 0 |
attr |
QuarkW8A8Fp8.weight_qscheme |
1 | 0 | 0 |
attr |
QuarkW8A8Fp8.activation_quant_key |
1 | 0 | 0 |
attr |
QuarkW8A8Fp8.weight_quant_key |
1 | 0 | 0 |
attr |
QuarkW8A8Fp8.out_dtype |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.quark.schemes.quark_w8a8_int8 (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuarkW8A8Int8.init |
4 | 3 | 0 |
meth |
QuarkW8A8Int8.create_weights |
7 | 5 | 0 |
attr |
QuarkW8A8Int8.qscheme |
1 | 0 | 0 |
attr |
QuarkW8A8Int8.is_static_input_scheme |
1 | 0 | 0 |
attr |
QuarkW8A8Int8.input_symmetric |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.quark.utils (1 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
deep_compare |
3 | 3 | 2 |
func |
quark_quantize_weight_to_mxfp4 |
2 | 1 | 0 |
vllm.model_executor.layers.quantization.qutlass_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
triton_scale_swizzle |
9 | 8 | 0 |
vllm.model_executor.layers.quantization.schema (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
QuantParamSchema.model_config |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.torchao (9 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TorchAOLinearMethod.init |
2 | 1 | 0 |
meth |
TorchAOLinearMethod.create_weights |
8 | 6 | 0 |
attr |
TorchAOLinearMethod.quant_config |
1 | 0 | 0 |
meth |
TorchAOConfig.init |
4 | 3 | 0 |
attr |
TorchAOConfig.torchao_config |
1 | 0 | 0 |
attr |
TorchAOConfig.skip_modules |
1 | 0 | 0 |
attr |
TorchAOConfig.is_checkpoint_torchao_serialized |
1 | 0 | 0 |
func |
torchao_quantize_param_data |
3 | 3 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.allspark_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
check_allspark_supported_dtype_shape |
6 | 5 | 0 |
vllm.model_executor.layers.quantization.utils.flashinfer_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
rotate_weights_for_fi_trtllm_fp8_per_tensor_moe |
4 | 3 | 0 |
vllm.model_executor.layers.quantization.utils.fp8_utils (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
prepare_fp8_moe_layer_for_deepgemm |
6 | 5 | 0 |
func |
maybe_post_process_fp8_weight_block |
2 | 1 | 0 |
meth |
W8A8BlockFp8LinearOp.init |
5 | 4 | 0 |
attr |
W8A8BlockFp8LinearOp.weight_group_shape |
1 | 0 | 0 |
attr |
W8A8BlockFp8LinearOp.act_quant_group_shape |
1 | 0 | 0 |
attr |
W8A8BlockFp8LinearOp.is_deep_gemm_supported |
1 | 0 | 0 |
attr |
W8A8BlockFp8LinearOp.is_hopper |
1 | 0 | 0 |
attr |
W8A8BlockFp8LinearOp.use_deep_gemm_e8m0 |
1 | 0 | 0 |
attr |
W8A8BlockFp8LinearOp.is_flashinfer_supported |
1 | 0 | 0 |
attr |
W8A8BlockFp8LinearOp.deepgemm_input_quant_op |
1 | 0 | 0 |
func |
silu_mul_per_token_group_quant_fp8_colmajor |
5 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.gptq_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
override_config |
3 | 2 | 0 |
func |
get_linear_quant_method |
5 | 4 | 0 |
vllm.model_executor.layers.quantization.utils.int8_utils (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
round_int8 |
2 | 0 | 0 |
func |
per_token_quant_int8 |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.layer_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
update_tensor_inplace |
3 | 2 | 0 |
vllm.model_executor.layers.quantization.utils.marlin_utils (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
marlin_moe_intermediate_size |
3 | 2 | 0 |
func |
marlin_moe_permute_scales |
6 | 5 | 0 |
func |
get_scale_perms |
1 | 0 | 0 |
func |
maybe_warn_marlin_atomic_add |
3 | 0 | 0 |
func |
get_marlin_input_dtype |
2 | 1 | 0 |
func |
marlin_quant_input |
3 | 2 | 0 |
func |
query_marlin_supported_quant_types |
4 | 3 | 0 |
func |
marlin_act_int8_process_scales |
2 | 1 | 0 |
func |
maybe_warn_marlin_atomic_add_env |
1 | 0 | 0 |
func |
moe_awq_to_marlin_zero_points |
6 | 5 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_fp4_marlin_supported |
1 | 0 | 0 |
func |
rand_marlin_weight_mxfp4_like |
4 | 0 | 0 |
func |
mxfp4_marlin_process_scales |
3 | 0 | 0 |
func |
nvfp4_marlin_process_scales |
2 | 0 | 0 |
func |
nvfp4_marlin_process_global_scale |
2 | 0 | 0 |
func |
rand_marlin_weight_nvfp4_like |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.marlin_utils_fp8 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
fp8_fused_exponent_bias_into_scales |
2 | 0 | 0 |
func |
is_fp8_marlin_supported |
1 | 0 | 0 |
func |
marlin_quant_fp8_torch |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.marlin_utils_test (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
awq_marlin_quantize |
5 | 4 | 0 |
meth |
MarlinWorkspace.init |
4 | 0 | 0 |
attr |
MarlinWorkspace.scratch |
1 | 0 | 0 |
func |
marlin_permute_weights |
7 | 0 | 0 |
func |
get_weight_perm |
3 | 2 | 0 |
func |
marlin_quantize |
7 | 6 | 0 |
func |
marlin_weights |
7 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.mxfp4_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
get_padding_alignment |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.mxfp8_utils (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Mxfp8LinearOp.init |
2 | 1 | 0 |
attr |
Mxfp8LinearOp.backend |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.nvfp4_emulation_utils (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
dequantize_to_dtype |
7 | 0 | 0 |
func |
break_fp4_bytes |
3 | 0 | 0 |
func |
ref_nvfp4_quant |
4 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.nvfp4_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.ocp_mx_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OCP_MX_Scheme.from_quant_dtype |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.quant_utils (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
kMxfp4Dynamic |
1 | 0 | 0 |
attr |
kNvfp4Static |
1 | 0 | 0 |
func |
unpack_quantized_values_into_int32 |
4 | 3 | 0 |
func |
unpack_cols |
5 | 4 | 0 |
attr |
kFp8StaticTensorSym |
1 | 0 | 0 |
attr |
kMxfp4StaticGroupScale |
1 | 0 | 0 |
attr |
kMxfp4Static |
1 | 0 | 0 |
attr |
kFp8StaticChannelSym |
1 | 0 | 0 |
func |
group_broadcast |
3 | 0 | 0 |
attr |
kStaticChannelScale |
1 | 0 | 0 |
attr |
kDynamic128Scale |
1 | 0 | 0 |
func |
pack_cols |
5 | 4 | 0 |
func |
awq_pack |
5 | 4 | 0 |
attr |
kStaticTokenScale |
1 | 0 | 0 |
attr |
kMxfp8DynamicGroupScale |
1 | 0 | 0 |
func |
pack_quantized_values_into_int32 |
4 | 3 | 0 |
attr |
kMxfp8Dynamic |
1 | 0 | 0 |
func |
sort_weights |
3 | 2 | 0 |
meth |
ScaleDesc.str |
1 | 0 | 0 |
attr |
kFp8DynamicTensorSym |
1 | 0 | 0 |
meth |
QuantKey.str |
1 | 0 | 0 |
func |
pack_rows |
5 | 4 | 0 |
func |
get_attribute_fallback |
3 | 1 | 0 |
func |
get_pack_factor |
2 | 0 | 0 |
attr |
kFp8StaticTokenSym |
1 | 0 | 0 |
attr |
kMxfp4DynamicGroupScale |
1 | 0 | 0 |
attr |
kDynamicTokenScale |
1 | 0 | 0 |
attr |
kNvfp4StaticGroupScale |
1 | 0 | 0 |
attr |
kDynamic64Scale |
1 | 0 | 0 |
attr |
kDynamicTensorScale |
1 | 0 | 0 |
attr |
kFp8Dynamic64Sym |
1 | 0 | 0 |
func |
gptq_pack |
5 | 4 | 0 |
func |
gptq_quantize_weights |
6 | 5 | 0 |
attr |
kNvfp4Dynamic |
1 | 0 | 0 |
attr |
kFp8Dynamic128Sym |
1 | 0 | 0 |
func |
quantize_weights |
6 | 5 | 0 |
attr |
kStaticTensorScale |
1 | 0 | 0 |
attr |
FP8_DTYPE |
1 | 0 | 0 |
attr |
kStatic128BlockScale |
1 | 0 | 0 |
func |
permute_rows |
5 | 4 | 0 |
func |
get_and_maybe_dequant_weights |
3 | 2 | 0 |
attr |
kFp8Static128BlockSym |
1 | 0 | 0 |
attr |
kNvfp4DynamicGroupScale |
1 | 0 | 0 |
attr |
kFp8DynamicTokenSym |
1 | 0 | 0 |
vllm.model_executor.layers.quantization.utils.w8a8_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
CUTLASS_FP8_SUPPORTED |
1 | 0 | 0 |
attr |
CUTLASS_BLOCK_FP8_SUPPORTED |
1 | 0 | 0 |
vllm.model_executor.layers.resampler (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseResampler._repeat |
3 | 1 | 0 |
attr |
BaseResampler.num_queries |
1 | 0 | 0 |
attr |
BaseResampler.embed_dim |
1 | 0 | 0 |
attr |
BaseResampler.num_heads |
1 | 0 | 0 |
attr |
BaseResampler.query |
1 | 0 | 0 |
attr |
BaseResampler.attn |
1 | 0 | 0 |
attr |
BaseResampler.ln_q |
1 | 0 | 0 |
attr |
BaseResampler.ln_kv |
1 | 0 | 0 |
attr |
BaseResampler.do_post_projection |
1 | 0 | 0 |
attr |
BaseResampler.kv_proj |
1 | 0 | 0 |
attr |
BaseResampler.ln_post |
1 | 0 | 0 |
attr |
BaseResampler.proj |
1 | 0 | 0 |
attr |
Resampler2.adaptive |
1 | 0 | 0 |
attr |
Resampler2.pos_embed |
1 | 0 | 0 |
attr |
DEFAULT_LN |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.base (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
RotaryEmbeddingBase.head_size |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.rotary_dim |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.max_position_embeddings |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.base |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.is_neox_style |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.dtype |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.use_aiter |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.apply_rotary_emb |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.use_flashinfer |
1 | 0 | 0 |
attr |
RotaryEmbeddingBase.rocm_aiter_triton_rotary_embedding |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.common (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ApplyRotaryEmb.is_neox_style |
1 | 0 | 0 |
attr |
ApplyRotaryEmb.enable_fp32_compute |
1 | 0 | 0 |
attr |
ApplyRotaryEmb.apply_rotary_emb_flash_attn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.deepseek_scaling_rope (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
DeepseekScalingRotaryEmbedding.scaling_factor |
1 | 0 | 0 |
attr |
DeepseekScalingRotaryEmbedding.extrapolation_factor |
1 | 0 | 0 |
attr |
DeepseekScalingRotaryEmbedding.attn_factor |
1 | 0 | 0 |
attr |
DeepseekScalingRotaryEmbedding.beta_fast |
1 | 0 | 0 |
attr |
DeepseekScalingRotaryEmbedding.beta_slow |
1 | 0 | 0 |
attr |
DeepseekScalingRotaryEmbedding.mscale |
1 | 0 | 0 |
attr |
DeepseekScalingRotaryEmbedding.use_flashinfer |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.dual_chunk_rope (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DualChunkRotaryEmbedding._apply_rotary_embedding |
4 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.head_size |
1 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.rotary_dim |
1 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.max_position_embeddings |
1 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.base |
1 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.is_neox_style |
1 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.chunk_size |
1 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.local_size |
1 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.dtype |
1 | 0 | 0 |
attr |
DualChunkRotaryEmbedding.device |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.dynamic_ntk_alpha_rope (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
DynamicNTKAlphaRotaryEmbedding.scaling_alpha |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.dynamic_ntk_scaling_rope (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
DynamicNTKScalingRotaryEmbedding.scaling_factor |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.fope (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FourierRotaryEmbedding.init |
12 | 11 | 0 |
meth |
FourierRotaryEmbedding.weight_loader |
3 | 2 | 0 |
attr |
FourierRotaryEmbedding.num_key_value_heads |
1 | 0 | 0 |
attr |
FourierRotaryEmbedding.num_inv_freq |
1 | 0 | 0 |
attr |
FourierRotaryEmbedding.fope_sep_head |
1 | 0 | 0 |
attr |
FourierRotaryEmbedding.fope_init_factor |
1 | 0 | 0 |
attr |
FourierRotaryEmbedding.input_dim |
1 | 0 | 0 |
attr |
FourierRotaryEmbedding.output_dim |
1 | 0 | 0 |
attr |
FourierRotaryEmbedding.cos_coef |
1 | 0 | 0 |
attr |
FourierRotaryEmbedding.sin_coef |
1 | 0 | 0 |
attr |
FourierRotaryEmbedding.update_cache |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.llama3_rope (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Llama3RotaryEmbedding.scaling_factor |
1 | 0 | 0 |
attr |
Llama3RotaryEmbedding.low_freq_factor |
1 | 0 | 0 |
attr |
Llama3RotaryEmbedding.high_freq_factor |
1 | 0 | 0 |
attr |
Llama3RotaryEmbedding.orig_max_position |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.llama4_vision_rope (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama4VisionRotaryEmbedding.init |
7 | 6 | 0 |
vllm.model_executor.layers.rotary_embedding.mrope (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MRotaryEmbedding.get_next_input_positions_tensor |
6 | 5 | 0 |
attr |
MRotaryEmbedding.scaling_factor |
1 | 0 | 0 |
attr |
MRotaryEmbedding.extrapolation_factor |
1 | 0 | 0 |
attr |
MRotaryEmbedding.attn_factor |
1 | 0 | 0 |
attr |
MRotaryEmbedding.beta_fast |
1 | 0 | 0 |
attr |
MRotaryEmbedding.beta_slow |
1 | 0 | 0 |
attr |
MRotaryEmbedding.truncate |
1 | 0 | 0 |
attr |
MRotaryEmbedding.cache_max_position_num |
1 | 0 | 0 |
attr |
MRotaryEmbedding.mrope_section |
1 | 0 | 0 |
attr |
MRotaryEmbedding.mrope_interleaved |
1 | 0 | 0 |
attr |
MRotaryEmbedding.mscale |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.mrope_interleaved (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MRotaryEmbeddingInterleaved.cache_max_position_num |
1 | 0 | 0 |
attr |
MRotaryEmbeddingInterleaved.mrope_section |
1 | 0 | 0 |
attr |
MRotaryEmbeddingInterleaved.mrope_interleaved |
1 | 0 | 0 |
attr |
MRotaryEmbeddingInterleaved.mrope_dim |
1 | 0 | 0 |
attr |
MRotaryEmbeddingInterleaved.layer_cache |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.ntk_scaling_rope (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
NTKScalingRotaryEmbedding.scaling_factor |
1 | 0 | 0 |
attr |
NTKScalingRotaryEmbedding.mixed_b |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.phi3_long_rope_scaled_rope (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Phi3LongRoPEScaledRotaryEmbedding.init |
12 | 11 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.rotary_dim |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.head_size |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.max_position_embeddings |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.original_max_position_embeddings |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.base |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.short_factor |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.long_factor |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.use_long_rope |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.short_mscale |
1 | 0 | 0 |
attr |
Phi3LongRoPEScaledRotaryEmbedding.long_mscale |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.xdrope (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XDRotaryEmbedding.get_next_input_positions_tensor |
5 | 4 | 0 |
attr |
XDRotaryEmbedding.xdrope_section |
1 | 0 | 0 |
vllm.model_executor.layers.rotary_embedding.yarn_scaling_rope (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
YaRNScalingRotaryEmbedding.scaling_factor |
1 | 0 | 0 |
attr |
YaRNScalingRotaryEmbedding.extrapolation_factor |
1 | 0 | 0 |
attr |
YaRNScalingRotaryEmbedding.attn_factor |
1 | 0 | 0 |
attr |
YaRNScalingRotaryEmbedding.beta_fast |
1 | 0 | 0 |
attr |
YaRNScalingRotaryEmbedding.beta_slow |
1 | 0 | 0 |
attr |
YaRNScalingRotaryEmbedding.truncate |
1 | 0 | 0 |
attr |
YaRNScalingRotaryEmbedding.mscale |
1 | 0 | 0 |
vllm.model_executor.layers.sparse_attn_indexer (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
SparseAttnIndexer.init |
9 | 7 | 0 |
meth |
SparseAttnIndexer.forward_native |
5 | 4 | 0 |
meth |
SparseAttnIndexer.forward_cuda |
5 | 4 | 0 |
meth |
SparseAttnIndexer.forward_hip |
5 | 4 | 0 |
attr |
SparseAttnIndexer.k_cache |
1 | 0 | 0 |
attr |
SparseAttnIndexer.quant_block_size |
1 | 0 | 0 |
attr |
SparseAttnIndexer.scale_fmt |
1 | 0 | 0 |
attr |
SparseAttnIndexer.topk_tokens |
1 | 0 | 0 |
attr |
SparseAttnIndexer.head_dim |
1 | 0 | 0 |
attr |
SparseAttnIndexer.max_model_len |
1 | 0 | 0 |
attr |
SparseAttnIndexer.max_total_seq_len |
1 | 0 | 0 |
attr |
SparseAttnIndexer.topk_indices_buffer |
1 | 0 | 0 |
vllm.model_executor.layers.utils (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
default_unquantized_gemm |
5 | 4 | 0 |
func |
use_aiter_triton_gemm |
5 | 0 | 0 |
func |
cpu_unquantized_gemm |
5 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.layers.vocab_parallel_embedding (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UnquantizedEmbeddingMethod.create_weights |
8 | 6 | 0 |
meth |
VocabParallelEmbedding.init |
8 | 7 | 0 |
meth |
VocabParallelEmbedding.weight_loader |
3 | 2 | 0 |
meth |
VocabParallelEmbedding.forward_native |
2 | 0 | 0 |
meth |
VocabParallelEmbedding.forward_cuda |
2 | 0 | 0 |
attr |
VocabParallelEmbedding.tp_size |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.num_embeddings |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.padding_size |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.org_vocab_size |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.org_vocab_size_padded |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.num_embeddings_padded |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.shard_indices |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.embedding_dim |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.num_added_embeddings |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.num_embeddings_per_partition |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.num_org_embeddings_per_partition |
1 | 0 | 0 |
attr |
VocabParallelEmbedding.num_added_embeddings_per_partition |
1 | 0 | 0 |
meth |
ParallelLMHead.init |
9 | 8 | 0 |
meth |
ParallelLMHead.tie_weights |
2 | 1 | 0 |
meth |
ParallelLMHead.forward |
2 | 0 | 0 |
attr |
ParallelLMHead.quant_config |
1 | 0 | 0 |
attr |
ParallelLMHead.bias |
1 | 0 | 0 |
meth |
VocabParallelEmbeddingShardIndices.post_init |
1 | 0 | 0 |
vllm.model_executor.model_loader (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
register_model_loader |
2 | 1 | 0 |
vllm.model_executor.model_loader.base_loader (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseModelLoader.init |
2 | 1 | 0 |
attr |
BaseModelLoader.load_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.model_loader.bitsandbytes_loader (17 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BitsAndBytesModelLoader.init |
2 | 1 | 0 |
meth |
BitsAndBytesModelLoader._hf_weight_iter |
3 | 1 | 0 |
meth |
BitsAndBytesModelLoader._is_8bit_weight_name |
2 | 1 | 0 |
meth |
BitsAndBytesModelLoader._is_4bit_weight_name |
2 | 1 | 0 |
meth |
BitsAndBytesModelLoader._quantized_8bit_generator |
4 | 1 | 0 |
meth |
BitsAndBytesModelLoader._quantized_4bit_generator |
4 | 1 | 0 |
meth |
BitsAndBytesModelLoader._unquantized_generator |
4 | 1 | 0 |
meth |
BitsAndBytesModelLoader._classify_module_sharding |
2 | 1 | 0 |
meth |
BitsAndBytesModelLoader._dequantize_dq |
2 | 1 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.model_loader.default_loader (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DefaultModelLoader.init |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.model_loader.dummy_loader (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DummyModelLoader.init |
2 | 1 | 0 |
vllm.model_executor.model_loader.gguf_loader (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GGUFModelLoader.init |
2 | 1 | 0 |
meth |
GGUFModelLoader._prepare_weights |
2 | 1 | 0 |
meth |
GGUFModelLoader._get_gguf_weights_map |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.model_loader.reload.layerwise (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
finalize_layerwise_reload |
3 | 2 | 0 |
func |
initialize_layerwise_reload |
2 | 1 | 0 |
func |
record_metadata_for_reloading |
2 | 1 | 0 |
vllm.model_executor.model_loader.reload.meta (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
restore_layer_on_meta |
3 | 2 | 0 |
vllm.model_executor.model_loader.reload.torchao_decorator (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
support_quantized_model_reload_from_hp_weights |
2 | 1 | 0 |
func |
set_torchao_reload_attrs |
3 | 2 | 0 |
vllm.model_executor.model_loader.reload.types (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayerReloadingInfo.reset |
1 | 0 | 0 |
vllm.model_executor.model_loader.runai_streamer_loader (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RunaiModelStreamerLoader.init |
2 | 1 | 0 |
vllm.model_executor.model_loader.sharded_state_loader (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ShardedStateLoader.init |
2 | 1 | 0 |
meth |
ShardedStateLoader._prepare_weights |
3 | 2 | 0 |
meth |
ShardedStateLoader.iterate_over_files |
2 | 1 | 0 |
attr |
ShardedStateLoader.pattern |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.model_loader.tensorizer (15 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
EncryptionParams |
1 | 0 | 0 |
attr |
no_init_or_tensor |
1 | 0 | 0 |
attr |
TensorDeserializer |
1 | 0 | 0 |
attr |
open_stream |
1 | 0 | 0 |
meth |
TensorizerConfig.post_init |
1 | 0 | 0 |
meth |
TensorizerConfig.open_stream |
2 | 1 | 0 |
meth |
TensorizerConfig.keys |
1 | 0 | 0 |
meth |
TensorizerConfig.len |
1 | 0 | 0 |
meth |
TensorizerConfig.iter |
1 | 0 | 0 |
meth |
TensorizerConfig.getitem |
2 | 2 | 1 |
meth |
TensorizerConfig.setitem |
3 | 3 | 1 |
meth |
TensorizerConfig.delitem |
2 | 0 | 0 |
attr |
get_mem_usage |
1 | 0 | 0 |
attr |
DecryptionParams |
1 | 0 | 0 |
attr |
convert_bytes |
1 | 0 | 0 |
attr |
TensorSerializer |
1 | 0 | 0 |
vllm.model_executor.model_loader.tensorizer_loader (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TensorizerLoader.init |
2 | 1 | 0 |
meth |
TensorizerLoader._verify_config |
3 | 2 | 0 |
attr |
TensorizerLoader.tensorizer_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
validate_config |
2 | 1 | 0 |
vllm.model_executor.model_loader.utils (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
ParamMapping.post_init |
1 | 0 | 0 |
func |
device_loading_context |
3 | 2 | 0 |
func |
configure_quant_config |
3 | 2 | 0 |
vllm.model_executor.model_loader.weight_utils (15 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
SafeTensorsFileLoader |
1 | 0 | 0 |
attr |
SingleGroup |
1 | 0 | 0 |
func |
enable_xet_high_performance |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
gguf |
1 | 0 | 0 |
attr |
temp_dir |
1 | 0 | 0 |
attr |
SafetensorsStreamer |
1 | 0 | 0 |
func |
enable_hf_transfer |
1 | 0 | 0 |
func |
convert_pyslice_to_tensor |
2 | 2 | 1 |
attr |
fastsafetensors |
1 | 0 | 0 |
attr |
runai_model_streamer |
1 | 0 | 0 |
func |
get_lock |
3 | 2 | 0 |
func |
enable_tqdm |
2 | 1 | 0 |
meth |
DisabledTqdm.init |
3 | 0 | 0 |
vllm.model_executor.models.AXK1 (93 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AXK1Model.init |
3 | 2 | 0 |
attr |
AXK1Model.config |
1 | 0 | 0 |
attr |
AXK1Model.device |
1 | 0 | 0 |
attr |
AXK1Model.vocab_size |
1 | 0 | 0 |
attr |
AXK1Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
AXK1Model.embed_tokens |
1 | 0 | 0 |
attr |
AXK1Model.norm |
1 | 0 | 0 |
meth |
AXK1MoE.init |
5 | 4 | 0 |
attr |
AXK1MoE.tp_size |
1 | 0 | 0 |
attr |
AXK1MoE.tp_rank |
1 | 0 | 0 |
attr |
AXK1MoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
AXK1MoE.ep_group |
1 | 0 | 0 |
attr |
AXK1MoE.ep_rank |
1 | 0 | 0 |
attr |
AXK1MoE.ep_size |
1 | 0 | 0 |
attr |
AXK1MoE.is_sequence_parallel |
1 | 0 | 0 |
attr |
AXK1MoE.gate |
1 | 0 | 0 |
attr |
AXK1MoE.enable_eplb |
1 | 0 | 0 |
attr |
AXK1MoE.n_redundant_experts |
1 | 0 | 0 |
attr |
AXK1MoE.n_logical_experts |
1 | 0 | 0 |
attr |
AXK1MoE.n_physical_experts |
1 | 0 | 0 |
attr |
AXK1MoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
AXK1MoE.physical_expert_start |
1 | 0 | 0 |
attr |
AXK1MoE.physical_expert_end |
1 | 0 | 0 |
attr |
AXK1MoE.is_rocm_aiter_moe_enabled |
1 | 0 | 0 |
attr |
AXK1MoE.is_fusion_moe_shared_experts_enabled |
1 | 0 | 0 |
attr |
AXK1MoE.experts |
1 | 0 | 0 |
attr |
AXK1MoE.shared_experts |
1 | 0 | 0 |
attr |
AXK1Attention.hidden_size |
1 | 0 | 0 |
attr |
AXK1Attention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
AXK1Attention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
AXK1Attention.qk_head_dim |
1 | 0 | 0 |
attr |
AXK1Attention.v_head_dim |
1 | 0 | 0 |
attr |
AXK1Attention.q_lora_rank |
1 | 0 | 0 |
attr |
AXK1Attention.kv_lora_rank |
1 | 0 | 0 |
attr |
AXK1Attention.num_heads |
1 | 0 | 0 |
attr |
AXK1Attention.num_local_heads |
1 | 0 | 0 |
attr |
AXK1Attention.scaling |
1 | 0 | 0 |
attr |
AXK1Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
AXK1Attention.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
AXK1Attention.kv_a_layernorm |
1 | 0 | 0 |
attr |
AXK1Attention.kv_b_proj |
1 | 0 | 0 |
attr |
AXK1Attention.o_proj |
1 | 0 | 0 |
attr |
AXK1Attention.rotary_emb |
1 | 0 | 0 |
attr |
AXK1Attention.attn |
1 | 0 | 0 |
attr |
AXK1Attention.q_a_proj |
1 | 0 | 0 |
attr |
AXK1Attention.q_a_layernorm |
1 | 0 | 0 |
attr |
AXK1Attention.q_b_proj |
1 | 0 | 0 |
attr |
AXK1Attention.q_proj |
1 | 0 | 0 |
meth |
AXK1ForCausalLM.init |
3 | 2 | 0 |
meth |
AXK1ForCausalLM.set_moe_parameters |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.config |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.use_mha |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.fuse_qkv_a_proj |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.model |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
AXK1ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
AXK1MixtureOfExperts.extract_moe_parameters |
2 | 1 | 0 |
attr |
AXK1DecoderLayer.config |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.use_mha |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.is_layer_sparse |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.post_mlp_layernorm |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.routed_scaling_factor |
1 | 0 | 0 |
attr |
AXK1DecoderLayer.mlp |
1 | 0 | 0 |
attr |
AXK1MLAAttention.hidden_size |
1 | 0 | 0 |
attr |
AXK1MLAAttention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
AXK1MLAAttention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
AXK1MLAAttention.qk_head_dim |
1 | 0 | 0 |
attr |
AXK1MLAAttention.v_head_dim |
1 | 0 | 0 |
attr |
AXK1MLAAttention.q_lora_rank |
1 | 0 | 0 |
attr |
AXK1MLAAttention.kv_lora_rank |
1 | 0 | 0 |
attr |
AXK1MLAAttention.num_heads |
1 | 0 | 0 |
attr |
AXK1MLAAttention.num_local_heads |
1 | 0 | 0 |
attr |
AXK1MLAAttention.scaling |
1 | 0 | 0 |
attr |
AXK1MLAAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
AXK1MLAAttention.kv_a_layernorm |
1 | 0 | 0 |
attr |
AXK1MLAAttention.kv_b_proj |
1 | 0 | 0 |
attr |
AXK1MLAAttention.o_proj |
1 | 0 | 0 |
attr |
AXK1MLAAttention.rotary_emb |
1 | 0 | 0 |
attr |
AXK1MLAAttention.mla_attn |
1 | 0 | 0 |
attr |
AXK1MLAAttention.fused_qkv_a_proj |
1 | 0 | 0 |
attr |
AXK1MLAAttention.q_a_layernorm |
1 | 0 | 0 |
attr |
AXK1MLAAttention.q_b_proj |
1 | 0 | 0 |
attr |
AXK1MLAAttention.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
AXK1MLAAttention.q_proj |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.adapters (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
load_weights_using_from_2_way_softmax |
3 | 1 | 0 |
func |
seq_cls_model_loader |
3 | 1 | 0 |
func |
load_weights_no_post_processing |
3 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.afmoe (77 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AfmoeAttention.init |
13 | 12 | 0 |
attr |
AfmoeAttention.layer_idx |
1 | 0 | 0 |
attr |
AfmoeAttention.hidden_size |
1 | 0 | 0 |
attr |
AfmoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
AfmoeAttention.num_heads |
1 | 0 | 0 |
attr |
AfmoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
AfmoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
AfmoeAttention.head_dim |
1 | 0 | 0 |
attr |
AfmoeAttention.q_size |
1 | 0 | 0 |
attr |
AfmoeAttention.kv_size |
1 | 0 | 0 |
attr |
AfmoeAttention.scaling |
1 | 0 | 0 |
attr |
AfmoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
AfmoeAttention.is_local_attention |
1 | 0 | 0 |
attr |
AfmoeAttention.sliding_window |
1 | 0 | 0 |
attr |
AfmoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
AfmoeAttention.o_proj |
1 | 0 | 0 |
attr |
AfmoeAttention.gate_proj |
1 | 0 | 0 |
attr |
AfmoeAttention.q_norm |
1 | 0 | 0 |
attr |
AfmoeAttention.k_norm |
1 | 0 | 0 |
attr |
AfmoeAttention.attn |
1 | 0 | 0 |
attr |
AfmoeAttention.rotary_emb |
1 | 0 | 0 |
meth |
AfmoeModel.init |
3 | 2 | 0 |
attr |
AfmoeModel.config |
1 | 0 | 0 |
attr |
AfmoeModel.vocab_size |
1 | 0 | 0 |
attr |
AfmoeModel.mup_enabled |
1 | 0 | 0 |
attr |
AfmoeModel.aux_hidden_state_layers |
1 | 0 | 0 |
attr |
AfmoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
AfmoeModel.embed_tokens |
1 | 0 | 0 |
attr |
AfmoeModel.norm |
1 | 0 | 0 |
meth |
AfmoeForCausalLM.init |
3 | 2 | 0 |
attr |
AfmoeForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.config |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.model |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.expert_weights |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.num_expert_groups |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.num_logical_experts |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.num_physical_experts |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.num_local_physical_experts |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.num_routed_experts |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.num_shared_experts |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.num_redundant_experts |
1 | 0 | 0 |
meth |
AfmoeDecoderLayer.init |
6 | 5 | 0 |
attr |
AfmoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
AfmoeDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
AfmoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
AfmoeDecoderLayer.moe_enabled |
1 | 0 | 0 |
attr |
AfmoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
AfmoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
AfmoeDecoderLayer.pre_mlp_layernorm |
1 | 0 | 0 |
attr |
AfmoeDecoderLayer.post_mlp_layernorm |
1 | 0 | 0 |
attr |
AfmoeDecoderLayer.mlp |
1 | 0 | 0 |
meth |
AfmoeMoE.init |
5 | 3 | 0 |
attr |
AfmoeMoE.tp_size |
1 | 0 | 0 |
attr |
AfmoeMoE.route_scale |
1 | 0 | 0 |
attr |
AfmoeMoE.score_func |
1 | 0 | 0 |
attr |
AfmoeMoE.route_norm |
1 | 0 | 0 |
attr |
AfmoeMoE.ep_group |
1 | 0 | 0 |
attr |
AfmoeMoE.ep_rank |
1 | 0 | 0 |
attr |
AfmoeMoE.ep_size |
1 | 0 | 0 |
attr |
AfmoeMoE.gate |
1 | 0 | 0 |
attr |
AfmoeMoE.expert_bias |
1 | 0 | 0 |
attr |
AfmoeMoE.enable_eplb |
1 | 0 | 0 |
attr |
AfmoeMoE.n_redundant_experts |
1 | 0 | 0 |
attr |
AfmoeMoE.n_logical_experts |
1 | 0 | 0 |
attr |
AfmoeMoE.n_physical_experts |
1 | 0 | 0 |
attr |
AfmoeMoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
AfmoeMoE.physical_expert_start |
1 | 0 | 0 |
attr |
AfmoeMoE.physical_expert_end |
1 | 0 | 0 |
attr |
AfmoeMoE.shared_experts |
1 | 0 | 0 |
attr |
AfmoeMoE.experts |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.aimv2 (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AIMv2Block.init |
4 | 3 | 0 |
attr |
AIMv2Block.attn |
1 | 0 | 0 |
attr |
AIMv2Block.norm_1 |
1 | 0 | 0 |
attr |
AIMv2Block.mlp |
1 | 0 | 0 |
attr |
AIMv2Block.norm_2 |
1 | 0 | 0 |
meth |
AIMv2PatchEmbed.init |
2 | 1 | 0 |
attr |
AIMv2PatchEmbed.proj |
1 | 0 | 0 |
attr |
AIMv2PatchEmbed.norm |
1 | 0 | 0 |
meth |
AIMv2Transformer.init |
5 | 4 | 0 |
attr |
AIMv2Transformer.blocks |
1 | 0 | 0 |
attr |
AIMv2Transformer.post_trunk_norm |
1 | 0 | 0 |
meth |
AIMv2Model.init |
5 | 4 | 0 |
attr |
AIMv2Model.preprocessor |
1 | 0 | 0 |
attr |
AIMv2Model.trunk |
1 | 0 | 0 |
meth |
AIMv2ViTPreprocessor.init |
2 | 1 | 0 |
attr |
AIMv2ViTPreprocessor.patchifier |
1 | 0 | 0 |
attr |
AIMv2ViTPreprocessor.pos_embed |
1 | 0 | 0 |
meth |
AIMv2SwiGLUFFN.init |
4 | 3 | 0 |
attr |
AIMv2SwiGLUFFN.fc13 |
1 | 0 | 0 |
attr |
AIMv2SwiGLUFFN.fc2 |
1 | 0 | 0 |
attr |
AIMv2SwiGLUFFN.act_fn |
1 | 0 | 0 |
meth |
AIMv2Attention.init |
4 | 3 | 0 |
attr |
AIMv2Attention.config |
1 | 0 | 0 |
attr |
AIMv2Attention.embed_dim |
1 | 0 | 0 |
attr |
AIMv2Attention.num_heads |
1 | 0 | 0 |
attr |
AIMv2Attention.head_dim |
1 | 0 | 0 |
attr |
AIMv2Attention.scale |
1 | 0 | 0 |
attr |
AIMv2Attention.qkv |
1 | 0 | 0 |
attr |
AIMv2Attention.proj |
1 | 0 | 0 |
attr |
AIMv2Attention.tp_size |
1 | 0 | 0 |
attr |
AIMv2Attention.num_heads_per_partition |
1 | 0 | 0 |
attr |
AIMv2Attention.attn |
1 | 0 | 0 |
vllm.model_executor.models.apertus (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ApertusModel.init |
4 | 3 | 0 |
attr |
ApertusModel.config |
1 | 0 | 0 |
attr |
ApertusModel.quant_config |
1 | 0 | 0 |
attr |
ApertusModel.vocab_size |
1 | 0 | 0 |
attr |
ApertusModel.aux_hidden_state_layers |
1 | 0 | 0 |
attr |
ApertusModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ApertusModel.embed_tokens |
1 | 0 | 0 |
attr |
ApertusModel.norm |
1 | 0 | 0 |
meth |
ApertusForCausalLM.init |
4 | 3 | 0 |
meth |
ApertusForCausalLM._init_model |
4 | 3 | 0 |
attr |
ApertusForCausalLM.config |
1 | 0 | 0 |
attr |
ApertusForCausalLM.model |
1 | 0 | 0 |
attr |
ApertusForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ApertusForCausalLM.lm_head |
1 | 0 | 0 |
attr |
ApertusForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
ApertusAttention.hidden_size |
1 | 0 | 0 |
attr |
ApertusAttention.total_num_heads |
1 | 0 | 0 |
attr |
ApertusAttention.num_heads |
1 | 0 | 0 |
attr |
ApertusAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
ApertusAttention.num_kv_heads |
1 | 0 | 0 |
attr |
ApertusAttention.head_dim |
1 | 0 | 0 |
attr |
ApertusAttention.q_size |
1 | 0 | 0 |
attr |
ApertusAttention.kv_size |
1 | 0 | 0 |
attr |
ApertusAttention.scaling |
1 | 0 | 0 |
attr |
ApertusAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
ApertusAttention.qkv_proj |
1 | 0 | 0 |
attr |
ApertusAttention.o_proj |
1 | 0 | 0 |
attr |
ApertusAttention.attn |
1 | 0 | 0 |
attr |
ApertusAttention.q_norm |
1 | 0 | 0 |
attr |
ApertusAttention.k_norm |
1 | 0 | 0 |
meth |
ApertusMLP.forward |
2 | 0 | 0 |
attr |
ApertusMLP.up_proj |
1 | 0 | 0 |
attr |
ApertusMLP.down_proj |
1 | 0 | 0 |
attr |
ApertusMLP.act_fn |
1 | 0 | 0 |
attr |
ApertusDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
ApertusDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
ApertusDecoderLayer.mlp |
1 | 0 | 0 |
attr |
ApertusDecoderLayer.attention_layernorm |
1 | 0 | 0 |
attr |
ApertusDecoderLayer.feedforward_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.arcee (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ArceeModel.init |
4 | 3 | 0 |
attr |
ArceeModel.quant_config |
1 | 0 | 0 |
attr |
ArceeModel.config |
1 | 0 | 0 |
attr |
ArceeModel.vocab_size |
1 | 0 | 0 |
attr |
ArceeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ArceeModel.embed_tokens |
1 | 0 | 0 |
attr |
ArceeModel.norm |
1 | 0 | 0 |
attr |
ArceeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
ArceeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
ArceeDecoderLayer.mlp |
1 | 0 | 0 |
attr |
ArceeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
ArceeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
ArceeMLP.up_proj |
1 | 0 | 0 |
attr |
ArceeMLP.down_proj |
1 | 0 | 0 |
attr |
ArceeMLP.act_fn |
1 | 0 | 0 |
meth |
ArceeForCausalLM.init |
3 | 2 | 0 |
attr |
ArceeForCausalLM.config |
1 | 0 | 0 |
attr |
ArceeForCausalLM.model |
1 | 0 | 0 |
attr |
ArceeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ArceeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
ArceeForCausalLM.logits_processor |
1 | 0 | 0 |
vllm.model_executor.models.arctic (64 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
ArcticMoE.init |
7 | 6 | 0 |
meth |
ArcticMoE.weight_loader |
5 | 4 | 0 |
meth |
ArcticMoE.forward |
2 | 1 | 0 |
attr |
ArcticMoE.tp_size |
1 | 0 | 0 |
attr |
ArcticMoE.hidden_size |
1 | 0 | 0 |
attr |
ArcticMoE.num_experts |
1 | 0 | 0 |
attr |
ArcticMoE.layer_id |
1 | 0 | 0 |
attr |
ArcticMoE.top_k |
1 | 0 | 0 |
attr |
ArcticMoE.intermediate_size |
1 | 0 | 0 |
attr |
ArcticMoE.is_moe_layer |
1 | 0 | 0 |
attr |
ArcticMoE.reduce_results |
1 | 0 | 0 |
attr |
ArcticMoE.params_dtype |
1 | 0 | 0 |
attr |
ArcticMoE.mlp |
1 | 0 | 0 |
attr |
ArcticMoE.gate |
1 | 0 | 0 |
attr |
ArcticMoE.ws |
1 | 0 | 0 |
attr |
ArcticMoE.w2s |
1 | 0 | 0 |
meth |
ArcticAttention.init |
5 | 4 | 0 |
attr |
ArcticAttention.config |
1 | 0 | 0 |
attr |
ArcticAttention.hidden_size |
1 | 0 | 0 |
attr |
ArcticAttention.total_num_heads |
1 | 0 | 0 |
attr |
ArcticAttention.num_heads |
1 | 0 | 0 |
attr |
ArcticAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
ArcticAttention.num_kv_heads |
1 | 0 | 0 |
attr |
ArcticAttention.head_dim |
1 | 0 | 0 |
attr |
ArcticAttention.q_size |
1 | 0 | 0 |
attr |
ArcticAttention.kv_size |
1 | 0 | 0 |
attr |
ArcticAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
ArcticAttention.scaling |
1 | 0 | 0 |
attr |
ArcticAttention.qkv_proj |
1 | 0 | 0 |
attr |
ArcticAttention.o_proj |
1 | 0 | 0 |
attr |
ArcticAttention.rotary_emb |
1 | 0 | 0 |
attr |
ArcticAttention.attn |
1 | 0 | 0 |
meth |
ArcticForCausalLM.init |
3 | 2 | 0 |
attr |
ArcticForCausalLM.config |
1 | 0 | 0 |
attr |
ArcticForCausalLM.model |
1 | 0 | 0 |
attr |
ArcticForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
ArcticForCausalLM.lm_head |
1 | 0 | 0 |
attr |
ArcticForCausalLM.num_experts |
1 | 0 | 0 |
attr |
ArcticForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
attr |
ArcticForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
ArcticForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ArcticDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
ArcticDecoderLayer.use_residual |
1 | 0 | 0 |
attr |
ArcticDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
ArcticDecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
ArcticDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
ArcticDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
ArcticDecoderLayer.residual_layernorm |
1 | 0 | 0 |
attr |
ArcticDecoderLayer.residual_mlp |
1 | 0 | 0 |
meth |
ArcticModel.init |
3 | 2 | 0 |
attr |
ArcticModel.vocab_size |
1 | 0 | 0 |
attr |
ArcticModel.embed_tokens |
1 | 0 | 0 |
attr |
ArcticModel.norm |
1 | 0 | 0 |
attr |
ArcticModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
ArcticMLP.init |
7 | 6 | 0 |
meth |
ArcticMLP.forward |
2 | 0 | 0 |
attr |
ArcticMLP.hidden_size |
1 | 0 | 0 |
attr |
ArcticMLP.expert_id |
1 | 0 | 0 |
attr |
ArcticMLP.ffn_dim |
1 | 0 | 0 |
attr |
ArcticMLP.w13 |
1 | 0 | 0 |
attr |
ArcticMLP.w2 |
1 | 0 | 0 |
attr |
ArcticMLP.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.aria (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
AriaProjectorMLP.linear_in |
1 | 0 | 0 |
attr |
AriaProjectorMLP.linear_out |
1 | 0 | 0 |
attr |
AriaProjectorMLP.act |
1 | 0 | 0 |
attr |
AriaVisionTransformer.post_layernorm |
1 | 0 | 0 |
meth |
AriaTextModel.init |
3 | 2 | 0 |
attr |
AriaProjector.patch_to_query_dict |
1 | 0 | 0 |
attr |
AriaProjector.in_features |
1 | 0 | 0 |
attr |
AriaProjector.num_heads |
1 | 0 | 0 |
attr |
AriaProjector.kv_dim |
1 | 0 | 0 |
attr |
AriaProjector.hidden_features |
1 | 0 | 0 |
attr |
AriaProjector.output_dim |
1 | 0 | 0 |
attr |
AriaProjector.query |
1 | 0 | 0 |
attr |
AriaProjector.cross_attn |
1 | 0 | 0 |
attr |
AriaProjector.layer_norm |
1 | 0 | 0 |
attr |
AriaProjector.feed_forward |
1 | 0 | 0 |
attr |
AriaTextMoELayer.config |
1 | 0 | 0 |
attr |
AriaTextMoELayer.router_weight |
1 | 0 | 0 |
attr |
AriaTextMoELayer.shared_experts |
1 | 0 | 0 |
attr |
AriaTextMoELayer.experts |
1 | 0 | 0 |
meth |
AriaForConditionalGeneration.init |
3 | 2 | 0 |
meth |
AriaForConditionalGeneration.load_weights |
2 | 1 | 0 |
attr |
AriaForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
AriaForConditionalGeneration.config |
1 | 0 | 0 |
attr |
AriaForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
AriaForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
AriaForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
AriaForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
AriaForConditionalGeneration.logits_processor |
1 | 0 | 0 |
meth |
AriaProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
AriaProcessingInfo.get_vision_config |
1 | 0 | 0 |
meth |
AriaProcessingInfo.get_hf_processor |
2 | 1 | 0 |
attr |
AriaTextDecoderLayer.mlp |
1 | 0 | 0 |
vllm.model_executor.models.audioflamingo3 (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AudioFlamingo3MultiModalProjector.init |
2 | 1 | 0 |
meth |
AudioFlamingo3MultiModalProjector.forward |
2 | 0 | 0 |
attr |
AudioFlamingo3MultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
AudioFlamingo3MultiModalProjector.act |
1 | 0 | 0 |
attr |
AudioFlamingo3MultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.init |
3 | 2 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
AudioFlamingo3Encoder.init |
2 | 1 | 0 |
meth |
AudioFlamingo3Encoder.forward |
3 | 2 | 0 |
meth |
AudioFlamingo3Encoder._get_feat_extract_output_lengths |
2 | 1 | 0 |
attr |
AudioFlamingo3Encoder.avg_pooler |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.pos_emb |
1 | 0 | 0 |
meth |
AudioFlamingo3ProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
AudioFlamingo3ProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
AudioFlamingo3ProcessingInfo.get_feature_extractor |
2 | 1 | 0 |
meth |
AudioFlamingo3ProcessingInfo.get_data_parser |
1 | 0 | 0 |
vllm.model_executor.models.aya_vision (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AyaVisionMultiModalProjector.init |
2 | 1 | 0 |
attr |
AyaVisionMultiModalProjector.config |
1 | 0 | 0 |
attr |
AyaVisionMultiModalProjector.downsample_factor |
1 | 0 | 0 |
attr |
AyaVisionMultiModalProjector.alignment_intermediate_size |
1 | 0 | 0 |
attr |
AyaVisionMultiModalProjector.layernorm |
1 | 0 | 0 |
attr |
AyaVisionMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
AyaVisionMultiModalProjector.act |
1 | 0 | 0 |
attr |
AyaVisionMultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
AyaVisionForConditionalGeneration.init |
3 | 2 | 0 |
meth |
AyaVisionForConditionalGeneration._process_image_input |
3 | 2 | 0 |
prop |
AyaVisionForConditionalGeneration.dtype |
1 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.config |
1 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.bagel (22 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BagelForConditionalGeneration.init |
3 | 2 | 0 |
attr |
BagelForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
BagelForConditionalGeneration.config |
1 | 0 | 0 |
attr |
BagelForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
BagelForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
BagelForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
BagelForConditionalGeneration.vit_model |
1 | 0 | 0 |
attr |
BagelForConditionalGeneration.connector |
1 | 0 | 0 |
attr |
BagelForConditionalGeneration.vit_pos_embed |
1 | 0 | 0 |
meth |
BagelMultiModalProcessor._get_mm_fields_config |
3 | 3 | 1 |
meth |
BagelVisionMLP.init |
7 | 6 | 0 |
attr |
BagelVisionMLP.fc1 |
1 | 0 | 0 |
attr |
BagelVisionMLP.act |
1 | 0 | 0 |
attr |
BagelVisionMLP.fc2 |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
PositionEmbedding.init |
3 | 2 | 0 |
meth |
PositionEmbedding._get_2d_sincos_pos_embed |
3 | 2 | 0 |
meth |
PositionEmbedding._get_2d_sincos_pos_embed_from_grid |
3 | 1 | 0 |
meth |
PositionEmbedding._get_1d_sincos_pos_embed_from_grid |
3 | 1 | 0 |
attr |
PositionEmbedding.max_num_patch_per_side |
1 | 0 | 0 |
attr |
PositionEmbedding.hidden_size |
1 | 0 | 0 |
vllm.model_executor.models.baichuan (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaiChuanBaseForCausalLM.init |
4 | 3 | 0 |
meth |
BaiChuanBaseForCausalLM.lm_head_weight_loader |
3 | 2 | 0 |
attr |
BaiChuanBaseForCausalLM.config |
1 | 0 | 0 |
attr |
BaiChuanBaseForCausalLM.tp_size |
1 | 0 | 0 |
attr |
BaiChuanBaseForCausalLM.quant_config |
1 | 0 | 0 |
attr |
BaiChuanBaseForCausalLM.model |
1 | 0 | 0 |
attr |
BaiChuanBaseForCausalLM.lm_head |
1 | 0 | 0 |
attr |
BaiChuanBaseForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
BaiChuanBaseForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
BaiChuanDecoderLayer.init |
6 | 5 | 0 |
attr |
BaiChuanDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
BaiChuanDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
BaiChuanDecoderLayer.mlp |
1 | 0 | 0 |
attr |
BaiChuanDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
BaiChuanDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
BaiChuanMLP.init |
6 | 5 | 0 |
meth |
BaiChuanMLP.forward |
2 | 0 | 0 |
attr |
BaiChuanMLP.gate_up_proj |
1 | 0 | 0 |
attr |
BaiChuanMLP.down_proj |
1 | 0 | 0 |
attr |
BaiChuanMLP.act_fn |
1 | 0 | 0 |
meth |
BaiChuanAttention.init |
9 | 8 | 0 |
attr |
BaiChuanAttention.hidden_size |
1 | 0 | 0 |
attr |
BaiChuanAttention.total_num_heads |
1 | 0 | 0 |
attr |
BaiChuanAttention.num_heads |
1 | 0 | 0 |
attr |
BaiChuanAttention.head_dim |
1 | 0 | 0 |
attr |
BaiChuanAttention.position_embedding |
1 | 0 | 0 |
attr |
BaiChuanAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
BaiChuanAttention.W_pack |
1 | 0 | 0 |
attr |
BaiChuanAttention.o_proj |
1 | 0 | 0 |
attr |
BaiChuanAttention.attn |
1 | 0 | 0 |
attr |
BaiChuanAttention.rotary_emb |
1 | 0 | 0 |
attr |
BaiChuanAttention.scaling |
1 | 0 | 0 |
meth |
BaiChuanForCausalLM.init |
3 | 2 | 0 |
meth |
BaichuanForCausalLM.init |
3 | 2 | 0 |
attr |
BaiChuanModel.config |
1 | 0 | 0 |
attr |
BaiChuanModel.vocab_size |
1 | 0 | 0 |
attr |
BaiChuanModel.embed_tokens |
1 | 0 | 0 |
attr |
BaiChuanModel.norm |
1 | 0 | 0 |
attr |
BaiChuanModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.bailing_moe (65 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BailingMoeBlock.init |
5 | 4 | 0 |
attr |
BailingMoeBlock.config |
1 | 0 | 0 |
attr |
BailingMoeBlock.input_layernorm |
1 | 0 | 0 |
attr |
BailingMoeBlock.attention |
1 | 0 | 0 |
attr |
BailingMoeBlock.post_attention_layernorm |
1 | 0 | 0 |
attr |
BailingMoeBlock.mlp |
1 | 0 | 0 |
meth |
BailingMLP.forward |
2 | 0 | 0 |
attr |
BailingMLP.gate_up_proj |
1 | 0 | 0 |
attr |
BailingMLP.down_proj |
1 | 0 | 0 |
attr |
BailingMLP.act_fn |
1 | 0 | 0 |
meth |
BailingMoeModel.init |
3 | 2 | 0 |
attr |
BailingMoeModel.config |
1 | 0 | 0 |
attr |
BailingMoeModel.vocab_size |
1 | 0 | 0 |
attr |
BailingMoeModel.embed_dim |
1 | 0 | 0 |
attr |
BailingMoeModel.tie_word_embeddings |
1 | 0 | 0 |
attr |
BailingMoeModel.embedding_dropout |
1 | 0 | 0 |
attr |
BailingMoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
BailingMoeModel.word_embeddings |
1 | 0 | 0 |
attr |
BailingMoeModel.norm |
1 | 0 | 0 |
meth |
BailingAttention.init |
6 | 5 | 0 |
attr |
BailingAttention.hidden_size |
1 | 0 | 0 |
attr |
BailingAttention.total_num_heads |
1 | 0 | 0 |
attr |
BailingAttention.total_kv_heads |
1 | 0 | 0 |
attr |
BailingAttention.num_heads |
1 | 0 | 0 |
attr |
BailingAttention.head_dim |
1 | 0 | 0 |
attr |
BailingAttention.q_size_per_rank |
1 | 0 | 0 |
attr |
BailingAttention.num_kv_heads |
1 | 0 | 0 |
attr |
BailingAttention.kv_size_per_rank |
1 | 0 | 0 |
attr |
BailingAttention.scale |
1 | 0 | 0 |
attr |
BailingAttention.use_qk_norm |
1 | 0 | 0 |
attr |
BailingAttention.use_rmsnorm |
1 | 0 | 0 |
attr |
BailingAttention.query_key_value |
1 | 0 | 0 |
attr |
BailingAttention.dense |
1 | 0 | 0 |
attr |
BailingAttention.rotary_emb |
1 | 0 | 0 |
attr |
BailingAttention.attn |
1 | 0 | 0 |
attr |
BailingAttention.query_layernorm |
1 | 0 | 0 |
attr |
BailingAttention.key_layernorm |
1 | 0 | 0 |
attr |
BailingMoeForCausalLM.config |
1 | 0 | 0 |
attr |
BailingMoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
BailingMoeForCausalLM.max_position_embeddings |
1 | 0 | 0 |
attr |
BailingMoeForCausalLM.model |
1 | 0 | 0 |
attr |
BailingMoeForCausalLM.tie_word_embeddings |
1 | 0 | 0 |
attr |
BailingMoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
BailingMoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
BailingMoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
BailingMoE.init |
6 | 5 | 0 |
attr |
BailingMoE.tp_size |
1 | 0 | 0 |
attr |
BailingMoE.tp_rank |
1 | 0 | 0 |
attr |
BailingMoE.num_experts |
1 | 0 | 0 |
attr |
BailingMoE.top_k |
1 | 0 | 0 |
attr |
BailingMoE.norm_expert_prob |
1 | 0 | 0 |
attr |
BailingMoE.hidden_size |
1 | 0 | 0 |
attr |
BailingMoE.quant_config |
1 | 0 | 0 |
attr |
BailingMoE.num_shared_experts |
1 | 0 | 0 |
attr |
BailingMoE.score_function |
1 | 0 | 0 |
attr |
BailingMoE.n_group |
1 | 0 | 0 |
attr |
BailingMoE.topk_group |
1 | 0 | 0 |
attr |
BailingMoE.use_grouped_topk |
1 | 0 | 0 |
attr |
BailingMoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
BailingMoE.gate |
1 | 0 | 0 |
attr |
BailingMoE.correction_bias |
1 | 0 | 0 |
attr |
BailingMoE.experts |
1 | 0 | 0 |
attr |
BailingMoE.router_dtype |
1 | 0 | 0 |
attr |
BailingMoE.shared_experts |
1 | 0 | 0 |
vllm.model_executor.models.bailing_moe_linear (129 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BailingMoeV25.init |
5 | 4 | 0 |
attr |
BailingMoeV25.layer_id |
1 | 0 | 0 |
attr |
BailingMoeV25.tp_size |
1 | 0 | 0 |
attr |
BailingMoeV25.tp_rank |
1 | 0 | 0 |
attr |
BailingMoeV25.num_experts |
1 | 0 | 0 |
attr |
BailingMoeV25.top_k |
1 | 0 | 0 |
attr |
BailingMoeV25.norm_expert_prob |
1 | 0 | 0 |
attr |
BailingMoeV25.hidden_size |
1 | 0 | 0 |
attr |
BailingMoeV25.quant_config |
1 | 0 | 0 |
attr |
BailingMoeV25.num_shared_experts |
1 | 0 | 0 |
attr |
BailingMoeV25.score_function |
1 | 0 | 0 |
attr |
BailingMoeV25.n_group |
1 | 0 | 0 |
attr |
BailingMoeV25.topk_group |
1 | 0 | 0 |
attr |
BailingMoeV25.use_grouped_topk |
1 | 0 | 0 |
attr |
BailingMoeV25.routed_scaling_factor |
1 | 0 | 0 |
attr |
BailingMoeV25.gate |
1 | 0 | 0 |
attr |
BailingMoeV25.experts |
1 | 0 | 0 |
attr |
BailingMoeV25.router_dtype |
1 | 0 | 0 |
attr |
BailingMoeV25.shared_experts |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.hidden_size |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.num_heads |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.layer_id |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.prefix |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.qk_head_dim |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.v_head_dim |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.q_lora_rank |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.kv_lora_rank |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.num_local_heads |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.scaling |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.kv_a_layernorm |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.kv_b_proj |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.o_proj |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.rotary_emb |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.mla_attn |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.fused_qkv_a_proj |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.q_a_layernorm |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.q_b_proj |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.q_proj |
1 | 0 | 0 |
attr |
BailingMoeV25MLAAttention.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
BailingMoeV25ForCausalLM.config |
1 | 0 | 0 |
attr |
BailingMoeV25ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
BailingMoeV25ForCausalLM.model |
1 | 0 | 0 |
attr |
BailingMoeV25ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
BailingMoeV25ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
BailingMoeV25DecoderLayer.layer_id |
1 | 0 | 0 |
attr |
BailingMoeV25DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
BailingMoeV25DecoderLayer.attention_type |
1 | 0 | 0 |
attr |
BailingMoeV25DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
BailingMoeV25DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
BailingMoeV25DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
BailingMoeV25DecoderLayer.mlp |
1 | 0 | 0 |
func |
is_linear_layer |
3 | 0 | 0 |
meth |
BailingMoeV25Model.init |
3 | 2 | 0 |
attr |
BailingMoeV25Model.config |
1 | 0 | 0 |
attr |
BailingMoeV25Model.vocab_size |
1 | 0 | 0 |
attr |
BailingMoeV25Model.embed_dim |
1 | 0 | 0 |
attr |
BailingMoeV25Model.layer_group_size |
1 | 0 | 0 |
attr |
BailingMoeV25Model.num_layers |
1 | 0 | 0 |
attr |
BailingMoeV25Model.decoder_attention_types |
1 | 0 | 0 |
attr |
BailingMoeV25Model.word_embeddings |
1 | 0 | 0 |
attr |
BailingMoeV25Model.norm |
1 | 0 | 0 |
meth |
BailingMoEGate.init |
4 | 3 | 0 |
meth |
BailingMoEGate.forward |
2 | 0 | 0 |
attr |
BailingMoEGate.params_dtype |
1 | 0 | 0 |
attr |
BailingMoEGate.weight |
1 | 0 | 0 |
attr |
BailingMoEGate.expert_bias |
1 | 0 | 0 |
meth |
BailingGroupRMSNormGate.init |
7 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
BailingMoELinearAttention.init |
7 | 6 | 0 |
meth |
BailingMoELinearAttention._prefill_and_mix_infer |
7 | 0 | 0 |
meth |
BailingMoELinearAttention._decode_infer |
7 | 0 | 0 |
attr |
BailingMoELinearAttention.layer_id |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.hidden_size |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.total_num_heads |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.total_kv_heads |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.tp_size |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.tp_rank |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.model_config |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.cache_config |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.prefix |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.head_dim |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.hidden_inner_size |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.scaling |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.tp_heads |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.rope_theta |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.tp_kv_heads |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.q_size_per_rank |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.kv_size_per_rank |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.use_qk_norm |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.linear_backend |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.linear_scale |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.linear_rope |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.BLOCK |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.query_key_value |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.g_proj |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.dense |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.group_norm_size |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.rms_norm_eps |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.g_norm |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.rotary_emb |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.tp_slope |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.linear_silu |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.query_layernorm |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.key_layernorm |
1 | 0 | 0 |
attr |
BailingMoELinearAttention.slope_rate |
1 | 0 | 0 |
vllm.model_executor.models.bamba (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BambaMixerDecoderLayer.forward |
4 | 2 | 0 |
attr |
BambaMixerDecoderLayer.config |
1 | 0 | 0 |
attr |
BambaMixerDecoderLayer.mamba |
1 | 0 | 0 |
attr |
BambaMixerDecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
BambaMixerDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
BambaMixerDecoderLayer.pre_ff_layernorm |
1 | 0 | 0 |
meth |
BambaMLP.forward |
2 | 0 | 0 |
attr |
BambaMLP.gate_up_proj |
1 | 0 | 0 |
attr |
BambaMLP.down_proj |
1 | 0 | 0 |
attr |
BambaMLP.act_fn |
1 | 0 | 0 |
meth |
BambaAttentionDecoderLayer.self_attention |
4 | 3 | 0 |
meth |
BambaAttentionDecoderLayer.forward |
5 | 3 | 0 |
attr |
BambaAttentionDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.total_num_heads |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.num_heads |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.total_num_kv_heads |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.num_kv_heads |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.head_dim |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.q_size |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.kv_size |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.scaling |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.max_position_embeddings |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.rotary_emb |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.qkv_proj |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.o_proj |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.attn |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
BambaAttentionDecoderLayer.pre_ff_layernorm |
1 | 0 | 0 |
meth |
BambaForCausalLM.init |
3 | 2 | 0 |
meth |
BambaForCausalLM.forward |
6 | 4 | 0 |
attr |
BambaForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
BambaForCausalLM.model_config |
1 | 0 | 0 |
attr |
BambaForCausalLM.quant_config |
1 | 0 | 0 |
attr |
BambaForCausalLM.config |
1 | 0 | 0 |
attr |
BambaForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
BambaForCausalLM.model |
1 | 0 | 0 |
attr |
BambaForCausalLM.lm_head |
1 | 0 | 0 |
attr |
BambaForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
BambaForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
BambaModel.init |
3 | 2 | 0 |
attr |
BambaModel.config |
1 | 0 | 0 |
attr |
BambaModel.vocab_size |
1 | 0 | 0 |
attr |
BambaModel.embed_tokens |
1 | 0 | 0 |
attr |
BambaModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
BambaModel.final_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.bee (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BeeMultiModalProjector.init |
2 | 0 | 0 |
attr |
BeeMultiModalProjector.pre_norm |
1 | 0 | 0 |
attr |
BeeMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
BeeMultiModalProjector.act |
1 | 0 | 0 |
attr |
BeeMultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
BeeProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
BeeProcessingInfo.get_hf_processor |
2 | 1 | 0 |
attr |
BeeForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
BeeForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
vllm.model_executor.models.bert (80 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertSelfOutput.init |
5 | 4 | 0 |
attr |
BertSelfOutput.dense |
1 | 0 | 0 |
attr |
BertSelfOutput.LayerNorm |
1 | 0 | 0 |
meth |
BertForTokenClassification.init |
3 | 2 | 0 |
meth |
BertForTokenClassification.load_weights |
2 | 1 | 0 |
attr |
BertForTokenClassification.head_dtype |
1 | 0 | 0 |
attr |
BertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BertForTokenClassification.bert |
1 | 0 | 0 |
attr |
BertForTokenClassification.classifier |
1 | 0 | 0 |
attr |
BertForTokenClassification.pooler |
1 | 0 | 0 |
meth |
BertEmbeddingModel.init |
3 | 2 | 0 |
meth |
BertEmbeddingModel.load_weights |
2 | 1 | 0 |
attr |
BertEmbeddingModel.model |
1 | 0 | 0 |
attr |
BertEmbeddingModel.pooler |
1 | 0 | 0 |
meth |
SPLADESparsePooler.init |
6 | 5 | 0 |
attr |
SPLADESparsePooler.mlm_head |
1 | 0 | 0 |
attr |
SPLADESparsePooler.cls_token_id |
1 | 0 | 0 |
attr |
SPLADESparsePooler.sep_token_id |
1 | 0 | 0 |
attr |
SPLADESparsePooler.pooling |
1 | 0 | 0 |
attr |
SPLADESparsePooler.remove_cls_sep |
1 | 0 | 0 |
meth |
BertEmbedding.init |
2 | 1 | 0 |
attr |
BertEmbedding.size |
1 | 0 | 0 |
attr |
BertEmbedding.word_embeddings |
1 | 0 | 0 |
attr |
BertEmbedding.position_embeddings |
1 | 0 | 0 |
attr |
BertEmbedding.token_type_embeddings |
1 | 0 | 0 |
attr |
BertEmbedding.LayerNorm |
1 | 0 | 0 |
attr |
BertEmbedding.position_embedding_type |
1 | 0 | 0 |
meth |
BertSelfAttention.init |
6 | 5 | 0 |
attr |
BertSelfAttention.hidden_size |
1 | 0 | 0 |
attr |
BertSelfAttention.total_num_heads |
1 | 0 | 0 |
attr |
BertSelfAttention.num_heads |
1 | 0 | 0 |
attr |
BertSelfAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
BertSelfAttention.head_dim |
1 | 0 | 0 |
attr |
BertSelfAttention.num_kv_heads |
1 | 0 | 0 |
attr |
BertSelfAttention.q_size |
1 | 0 | 0 |
attr |
BertSelfAttention.kv_size |
1 | 0 | 0 |
attr |
BertSelfAttention.scaling |
1 | 0 | 0 |
attr |
BertSelfAttention.qkv_proj |
1 | 0 | 0 |
attr |
BertSelfAttention.attn |
1 | 0 | 0 |
meth |
BertSpladeSparseEmbeddingModel.init |
4 | 3 | 0 |
meth |
BertSpladeSparseEmbeddingModel.load_weights |
2 | 1 | 0 |
attr |
BertSpladeSparseEmbeddingModel.mlm_head |
1 | 0 | 0 |
attr |
BertSpladeSparseEmbeddingModel.pooler |
1 | 0 | 0 |
meth |
BertAttention.init |
7 | 6 | 0 |
attr |
BertAttention.self |
1 | 0 | 0 |
attr |
BertAttention.output |
1 | 0 | 0 |
meth |
BertPooler.init |
2 | 1 | 0 |
attr |
BertPooler.dense |
1 | 0 | 0 |
attr |
BertPooler.act_fn |
1 | 0 | 0 |
attr |
BertPooler.head |
1 | 0 | 0 |
attr |
BertPoolingModel.pooler |
1 | 0 | 0 |
meth |
BertLayer.init |
5 | 4 | 0 |
meth |
BertLayer.forward |
2 | 1 | 0 |
attr |
BertLayer.attention |
1 | 0 | 0 |
attr |
BertLayer.intermediate |
1 | 0 | 0 |
attr |
BertLayer.output |
1 | 0 | 0 |
meth |
BertEncoder.init |
3 | 2 | 0 |
attr |
BertEncoder.layer |
1 | 0 | 0 |
meth |
BertModel._load_weights |
2 | 1 | 0 |
attr |
BertModel.config |
1 | 0 | 0 |
attr |
BertModel.embeddings |
1 | 0 | 0 |
attr |
BertModel.encoder |
1 | 0 | 0 |
meth |
BertOutput.init |
6 | 5 | 0 |
attr |
BertOutput.dense |
1 | 0 | 0 |
attr |
BertOutput.LayerNorm |
1 | 0 | 0 |
meth |
BertForSequenceClassification.init |
3 | 2 | 0 |
meth |
BertForSequenceClassification.load_weights |
2 | 1 | 0 |
attr |
BertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
BertForSequenceClassification.bert |
1 | 0 | 0 |
attr |
BertForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
BertForSequenceClassification.pooler |
1 | 0 | 0 |
meth |
BertIntermediate.init |
6 | 5 | 0 |
attr |
BertIntermediate.dense |
1 | 0 | 0 |
attr |
BertIntermediate.intermediate_act_fn |
1 | 0 | 0 |
meth |
BertMLMHead.init |
4 | 3 | 0 |
meth |
BertMLMHead.tie_weights_with_embeddings |
2 | 1 | 0 |
attr |
BertMLMHead.dense |
1 | 0 | 0 |
attr |
BertMLMHead.activation |
1 | 0 | 0 |
attr |
BertMLMHead.layer_norm |
1 | 0 | 0 |
attr |
BertMLMHead.decoder |
1 | 0 | 0 |
vllm.model_executor.models.bert_with_rope (70 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertWithRopeAttention.init |
8 | 7 | 0 |
attr |
BertWithRopeAttention.hidden_size |
1 | 0 | 0 |
attr |
BertWithRopeAttention.total_num_heads |
1 | 0 | 0 |
attr |
BertWithRopeAttention.num_heads |
1 | 0 | 0 |
attr |
BertWithRopeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
BertWithRopeAttention.head_dim |
1 | 0 | 0 |
attr |
BertWithRopeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
BertWithRopeAttention.q_size |
1 | 0 | 0 |
attr |
BertWithRopeAttention.kv_size |
1 | 0 | 0 |
attr |
BertWithRopeAttention.scaling |
1 | 0 | 0 |
attr |
BertWithRopeAttention.qkv_proj |
1 | 0 | 0 |
attr |
BertWithRopeAttention.rotary_emb |
1 | 0 | 0 |
attr |
BertWithRopeAttention.attn |
1 | 0 | 0 |
attr |
BertWithRopeAttention.out_proj |
1 | 0 | 0 |
meth |
BertWithRopeMLP.init |
7 | 6 | 0 |
attr |
BertWithRopeMLP.act_fn |
1 | 0 | 0 |
attr |
BertWithRopeMLP.up_proj |
1 | 0 | 0 |
attr |
BertWithRopeMLP.down_proj |
1 | 0 | 0 |
meth |
BertWithRopeEncoder.init |
5 | 4 | 0 |
attr |
BertWithRopeEncoder.layers |
1 | 0 | 0 |
meth |
NomicMoE.init |
8 | 7 | 0 |
meth |
NomicMoE.weight_loader |
4 | 3 | 0 |
attr |
NomicMoE.tp_size |
1 | 0 | 0 |
attr |
NomicMoE.num_total_experts |
1 | 0 | 0 |
attr |
NomicMoE.top_k |
1 | 0 | 0 |
attr |
NomicMoE.hidden_size |
1 | 0 | 0 |
attr |
NomicMoE.total_intermediate_size |
1 | 0 | 0 |
attr |
NomicMoE.intermediate_size |
1 | 0 | 0 |
attr |
NomicMoE.hidden_act |
1 | 0 | 0 |
attr |
NomicMoE.params_dtype |
1 | 0 | 0 |
attr |
NomicMoE.router |
1 | 0 | 0 |
attr |
NomicMoE.w1 |
1 | 0 | 0 |
attr |
NomicMoE.w2 |
1 | 0 | 0 |
attr |
NomicMoE.bias |
1 | 0 | 0 |
meth |
BertWithRopeBlock.init |
8 | 7 | 0 |
meth |
BertWithRopeBlock.forward |
3 | 2 | 0 |
attr |
BertWithRopeBlock.attn |
1 | 0 | 0 |
attr |
BertWithRopeBlock.attn_ln |
1 | 0 | 0 |
attr |
BertWithRopeBlock.mlp_ln |
1 | 0 | 0 |
attr |
BertWithRopeBlock.mlp |
1 | 0 | 0 |
attr |
SnowflakeGteNewModel.hf_to_vllm_mapper |
1 | 0 | 0 |
meth |
JinaRobertaModel.jina_merge_lora_weights |
2 | 1 | 0 |
attr |
JinaRobertaModel.hf_to_vllm_mapper |
1 | 0 | 0 |
meth |
BertWithRopeEmbedding.init |
2 | 1 | 0 |
attr |
BertWithRopeEmbedding.word_embeddings |
1 | 0 | 0 |
attr |
BertWithRopeEmbedding.LayerNorm |
1 | 0 | 0 |
attr |
BertWithRopeEmbedding.token_type_embeddings |
1 | 0 | 0 |
meth |
BertWithRopeGatedMLP.init |
7 | 6 | 0 |
attr |
BertWithRopeGatedMLP.act_fn |
1 | 0 | 0 |
attr |
BertWithRopeGatedMLP.gate_up_proj |
1 | 0 | 0 |
attr |
BertWithRopeGatedMLP.down_proj |
1 | 0 | 0 |
meth |
GteNewForSequenceClassification.init |
3 | 2 | 0 |
meth |
GteNewForSequenceClassification.load_weights |
2 | 1 | 0 |
attr |
GteNewForSequenceClassification.new |
1 | 0 | 0 |
attr |
GteNewForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
GteNewForSequenceClassification.pooler |
1 | 0 | 0 |
meth |
GteNewModel.init |
4 | 2 | 0 |
meth |
GteNewModel.split_up_gate_proj |
2 | 1 | 0 |
meth |
GteNewModel.ignore_unnecessary_layers |
2 | 1 | 0 |
attr |
GteNewModel.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
NomicBertModel.hf_to_vllm_mapper |
1 | 0 | 0 |
meth |
BertWithRope.init |
4 | 3 | 0 |
attr |
BertWithRope.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
BertWithRope.vllm_config |
1 | 0 | 0 |
attr |
BertWithRope.add_pooling_layer |
1 | 0 | 0 |
attr |
BertWithRope.config |
1 | 0 | 0 |
attr |
BertWithRope.embeddings |
1 | 0 | 0 |
attr |
BertWithRope.encoder |
1 | 0 | 0 |
attr |
BertWithRope.pooler |
1 | 0 | 0 |
vllm.model_executor.models.blip (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlipEncoder.forward |
2 | 1 | 0 |
attr |
BlipEncoder.config |
1 | 0 | 0 |
attr |
BlipEncoder.layers |
1 | 0 | 0 |
attr |
BlipMLP.config |
1 | 0 | 0 |
attr |
BlipMLP.activation_fn |
1 | 0 | 0 |
attr |
BlipMLP.fc1 |
1 | 0 | 0 |
attr |
BlipMLP.fc2 |
1 | 0 | 0 |
attr |
BlipEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
BlipEncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
BlipEncoderLayer.mlp |
1 | 0 | 0 |
attr |
BlipEncoderLayer.layer_norm2 |
1 | 0 | 0 |
attr |
BlipVisionModel.config |
1 | 0 | 0 |
attr |
BlipVisionModel.embeddings |
1 | 0 | 0 |
attr |
BlipVisionModel.encoder |
1 | 0 | 0 |
attr |
BlipVisionModel.post_layernorm |
1 | 0 | 0 |
meth |
BlipVisionEmbeddings.init |
2 | 1 | 0 |
attr |
BlipVisionEmbeddings.config |
1 | 0 | 0 |
attr |
BlipVisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
BlipVisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
BlipVisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
BlipVisionEmbeddings.class_embedding |
1 | 0 | 0 |
attr |
BlipVisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
BlipVisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
BlipVisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
BlipVisionEmbeddings.position_embedding |
1 | 0 | 0 |
meth |
BlipAttention._shape |
4 | 3 | 0 |
meth |
BlipAttention.forward |
2 | 1 | 0 |
attr |
BlipAttention.config |
1 | 0 | 0 |
attr |
BlipAttention.embed_dim |
1 | 0 | 0 |
attr |
BlipAttention.num_heads |
1 | 0 | 0 |
attr |
BlipAttention.head_dim |
1 | 0 | 0 |
attr |
BlipAttention.scale |
1 | 0 | 0 |
attr |
BlipAttention.dropout |
1 | 0 | 0 |
attr |
BlipAttention.qkv |
1 | 0 | 0 |
attr |
BlipAttention.projection |
1 | 0 | 0 |
attr |
BlipAttention.tp_size |
1 | 0 | 0 |
attr |
BlipAttention.num_heads_per_partition |
1 | 0 | 0 |
attr |
BlipAttention.attn |
1 | 0 | 0 |
vllm.model_executor.models.blip2 (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Blip2ProcessingInfo.get_hf_config |
1 | 0 | 0 |
attr |
Blip2QFormerModel.config |
1 | 0 | 0 |
attr |
Blip2QFormerModel.layernorm |
1 | 0 | 0 |
attr |
Blip2QFormerModel.dropout |
1 | 0 | 0 |
attr |
Blip2QFormerModel.encoder |
1 | 0 | 0 |
attr |
Blip2QFormerSelfOutput.dense |
1 | 0 | 0 |
attr |
Blip2QFormerSelfOutput.LayerNorm |
1 | 0 | 0 |
attr |
Blip2QFormerSelfOutput.dropout |
1 | 0 | 0 |
attr |
Blip2QFormerAttention.attention |
1 | 0 | 0 |
attr |
Blip2QFormerAttention.output |
1 | 0 | 0 |
attr |
Blip2QFormerOutput.dense |
1 | 0 | 0 |
attr |
Blip2QFormerOutput.LayerNorm |
1 | 0 | 0 |
attr |
Blip2QFormerOutput.dropout |
1 | 0 | 0 |
attr |
Blip2QFormerIntermediate.dense |
1 | 0 | 0 |
attr |
Blip2QFormerIntermediate.intermediate_act_fn |
1 | 0 | 0 |
meth |
Blip2QFormerMultiHeadAttention.transpose_for_scores |
2 | 0 | 0 |
meth |
Blip2QFormerMultiHeadAttention.forward |
3 | 2 | 0 |
attr |
Blip2QFormerMultiHeadAttention.config |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.num_attention_heads |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.attention_head_size |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.all_head_size |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.scaling |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.query |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.key |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.value |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.position_embedding_type |
1 | 0 | 0 |
attr |
Blip2QFormerMultiHeadAttention.dropout |
1 | 0 | 0 |
attr |
Blip2QFormerEncoder.config |
1 | 0 | 0 |
attr |
Blip2QFormerEncoder.layer |
1 | 0 | 0 |
meth |
Blip2QFormerLayer.forward |
4 | 3 | 0 |
attr |
Blip2QFormerLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
Blip2QFormerLayer.seq_len_dim |
1 | 0 | 0 |
attr |
Blip2QFormerLayer.attention |
1 | 0 | 0 |
attr |
Blip2QFormerLayer.layer_idx |
1 | 0 | 0 |
attr |
Blip2QFormerLayer.intermediate_query |
1 | 0 | 0 |
attr |
Blip2QFormerLayer.output_query |
1 | 0 | 0 |
attr |
Blip2QFormerLayer.crossattention |
1 | 0 | 0 |
attr |
Blip2QFormerLayer.has_cross_attention |
1 | 0 | 0 |
meth |
Blip2ForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Blip2ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.query_tokens |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.qformer |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.language_projection |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.bloom (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BloomMLP.init |
4 | 3 | 0 |
attr |
BloomMLP.dense_h_to_4h |
1 | 0 | 0 |
attr |
BloomMLP.gelu_impl |
1 | 0 | 0 |
attr |
BloomMLP.dense_4h_to_h |
1 | 0 | 0 |
meth |
BloomAttention.init |
5 | 4 | 0 |
attr |
BloomAttention.hidden_size |
1 | 0 | 0 |
attr |
BloomAttention.total_num_heads |
1 | 0 | 0 |
attr |
BloomAttention.head_dim |
1 | 0 | 0 |
attr |
BloomAttention.num_heads |
1 | 0 | 0 |
attr |
BloomAttention.query_key_value |
1 | 0 | 0 |
attr |
BloomAttention.dense |
1 | 0 | 0 |
attr |
BloomAttention.attn |
1 | 0 | 0 |
meth |
BloomModel.init |
3 | 2 | 0 |
attr |
BloomModel.config |
1 | 0 | 0 |
attr |
BloomModel.embed_dim |
1 | 0 | 0 |
attr |
BloomModel.word_embeddings |
1 | 0 | 0 |
attr |
BloomModel.word_embeddings_layernorm |
1 | 0 | 0 |
attr |
BloomModel.ln_f |
1 | 0 | 0 |
attr |
BloomModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
BloomBlock.init |
5 | 4 | 0 |
attr |
BloomBlock.input_layernorm |
1 | 0 | 0 |
attr |
BloomBlock.self_attention |
1 | 0 | 0 |
attr |
BloomBlock.post_attention_layernorm |
1 | 0 | 0 |
attr |
BloomBlock.mlp |
1 | 0 | 0 |
attr |
BloomBlock.apply_residual_connection_post_layernorm |
1 | 0 | 0 |
meth |
BloomForCausalLM.init |
3 | 2 | 0 |
attr |
BloomForCausalLM.config |
1 | 0 | 0 |
attr |
BloomForCausalLM.quant_config |
1 | 0 | 0 |
attr |
BloomForCausalLM.transformer |
1 | 0 | 0 |
attr |
BloomForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
BloomForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
BloomForCausalLM.lm_head |
1 | 0 | 0 |
vllm.model_executor.models.chameleon (113 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ChameleonAttention.hidden_size |
1 | 0 | 0 |
attr |
ChameleonAttention.total_num_heads |
1 | 0 | 0 |
attr |
ChameleonAttention.num_heads |
1 | 0 | 0 |
attr |
ChameleonAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
ChameleonAttention.num_kv_heads |
1 | 0 | 0 |
attr |
ChameleonAttention.head_dim |
1 | 0 | 0 |
attr |
ChameleonAttention.q_size |
1 | 0 | 0 |
attr |
ChameleonAttention.kv_size |
1 | 0 | 0 |
attr |
ChameleonAttention.scaling |
1 | 0 | 0 |
attr |
ChameleonAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
ChameleonAttention.qkv_proj |
1 | 0 | 0 |
attr |
ChameleonAttention.o_proj |
1 | 0 | 0 |
attr |
ChameleonAttention.q_norm |
1 | 0 | 0 |
attr |
ChameleonAttention.k_norm |
1 | 0 | 0 |
attr |
ChameleonAttention.rotary_emb |
1 | 0 | 0 |
attr |
ChameleonAttention.attn |
1 | 0 | 0 |
meth |
ChameleonImageVocabularyMapping.init |
2 | 1 | 0 |
prop |
ChameleonImageVocabularyMapping.val2name |
1 | 0 | 0 |
prop |
ChameleonImageVocabularyMapping.image_tokens |
1 | 0 | 0 |
prop |
ChameleonImageVocabularyMapping.bpe2img |
1 | 0 | 0 |
prop |
ChameleonImageVocabularyMapping.img2bpe |
1 | 0 | 0 |
prop |
ChameleonImageVocabularyMapping.bpe2img_search_tensors |
1 | 0 | 0 |
prop |
ChameleonImageVocabularyMapping.img2bpe_mapping_tensor |
1 | 0 | 0 |
attr |
ChameleonImageVocabularyMapping.vocab_map |
1 | 0 | 0 |
attr |
ChameleonImageVocabularyMapping.image_token_id |
1 | 0 | 0 |
meth |
ChameleonVQVAEEncoderResnetBlock.init |
5 | 2 | 0 |
meth |
ChameleonVQVAEEncoderResnetBlock.forward |
2 | 1 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.in_channels |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.out_channels |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.use_conv_shortcut |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.norm1 |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.conv1 |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.norm2 |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.dropout |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.conv2 |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.conv_shortcut |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderResnetBlock.nin_shortcut |
1 | 0 | 0 |
meth |
ChameleonVQVAEEncoderConvDownsample.init |
2 | 1 | 0 |
meth |
ChameleonVQVAEEncoderConvDownsample.forward |
2 | 1 | 0 |
attr |
ChameleonVQVAEEncoderConvDownsample.conv |
1 | 0 | 0 |
meth |
ChameleonVQVAEVectorQuantizer.init |
2 | 1 | 0 |
meth |
ChameleonVQVAEVectorQuantizer.forward |
2 | 1 | 0 |
attr |
ChameleonVQVAEVectorQuantizer.num_embeddings |
1 | 0 | 0 |
attr |
ChameleonVQVAEVectorQuantizer.embedding_dim |
1 | 0 | 0 |
attr |
ChameleonVQVAEVectorQuantizer.beta |
1 | 0 | 0 |
attr |
ChameleonVQVAEVectorQuantizer.embedding |
1 | 0 | 0 |
attr |
ChameleonVQVAEVectorQuantizer.re_embed |
1 | 0 | 0 |
attr |
ChameleonDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
ChameleonDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
ChameleonDecoderLayer.mlp |
1 | 0 | 0 |
attr |
ChameleonDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
ChameleonDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
ChameleonSwinDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
ChameleonSwinDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
ChameleonSwinDecoderLayer.mlp |
1 | 0 | 0 |
attr |
ChameleonSwinDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
ChameleonSwinDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
ChameleonMLP.forward |
2 | 0 | 0 |
attr |
ChameleonMLP.gate_up_proj |
1 | 0 | 0 |
attr |
ChameleonMLP.down_proj |
1 | 0 | 0 |
attr |
ChameleonMLP.act_fn |
1 | 0 | 0 |
meth |
ChameleonVQVAEEncoder.init |
2 | 1 | 0 |
meth |
ChameleonVQVAEEncoder.forward |
2 | 1 | 0 |
attr |
ChameleonVQVAEEncoder.num_resolutions |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoder.num_res_blocks |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoder.conv_in |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoder.in_channel_multiplier |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoder.down |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoder.mid |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoder.norm_out |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoder.conv_out |
1 | 0 | 0 |
meth |
ChameleonModel.init |
3 | 2 | 0 |
attr |
ChameleonModel.config |
1 | 0 | 0 |
attr |
ChameleonModel.vocab_size |
1 | 0 | 0 |
attr |
ChameleonModel.embed_tokens |
1 | 0 | 0 |
attr |
ChameleonModel.vocabulary_mapping |
1 | 0 | 0 |
attr |
ChameleonModel.norm |
1 | 0 | 0 |
attr |
ChameleonModel.vqmodel |
1 | 0 | 0 |
attr |
ChameleonModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
ChameleonLayerNorm.init |
4 | 0 | 0 |
meth |
ChameleonLayerNorm.forward |
2 | 0 | 0 |
attr |
ChameleonLayerNorm.normalized_shape |
1 | 0 | 0 |
meth |
ChameleonVQVAE.init |
2 | 1 | 0 |
attr |
ChameleonVQVAE.encoder |
1 | 0 | 0 |
attr |
ChameleonVQVAE.quantize |
1 | 0 | 0 |
attr |
ChameleonVQVAE.quant_conv |
1 | 0 | 0 |
attr |
ChameleonVQVAE.post_quant_conv |
1 | 0 | 0 |
meth |
ChameleonVQVAEEncoderAttnBlock.init |
2 | 1 | 0 |
meth |
ChameleonVQVAEEncoderAttnBlock.forward |
2 | 1 | 0 |
attr |
ChameleonVQVAEEncoderAttnBlock.in_channels |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderAttnBlock.norm |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderAttnBlock.q |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderAttnBlock.k |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderAttnBlock.v |
1 | 0 | 0 |
attr |
ChameleonVQVAEEncoderAttnBlock.proj_out |
1 | 0 | 0 |
meth |
ChameleonForConditionalGeneration.init |
3 | 2 | 0 |
meth |
ChameleonForConditionalGeneration.forward |
6 | 5 | 0 |
attr |
ChameleonForConditionalGeneration.config |
1 | 0 | 0 |
attr |
ChameleonForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
ChameleonForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
ChameleonForConditionalGeneration.logits_processor |
1 | 0 | 0 |
attr |
ChameleonForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ChameleonForConditionalGeneration.model |
1 | 0 | 0 |
meth |
ChameleonProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
ChameleonProcessingInfo.get_hf_processor |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.chatglm (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChatGLMForCausalLM.init |
3 | 2 | 0 |
meth |
ChatGLMModel.init |
3 | 2 | 0 |
attr |
ChatGLMModel.config |
1 | 0 | 0 |
attr |
ChatGLMModel.embedding |
1 | 0 | 0 |
attr |
ChatGLMModel.num_layers |
1 | 0 | 0 |
attr |
ChatGLMModel.multi_query_group_num |
1 | 0 | 0 |
attr |
ChatGLMModel.kv_channels |
1 | 0 | 0 |
attr |
ChatGLMModel.encoder |
1 | 0 | 0 |
attr |
ChatGLMModel.output_layer |
1 | 0 | 0 |
attr |
ChatGLMModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
ChatGLMBaseModel.load_weights |
2 | 1 | 0 |
attr |
ChatGLMBaseModel.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
ChatGLMBaseModel.config |
1 | 0 | 0 |
attr |
ChatGLMBaseModel.multimodal_config |
1 | 0 | 0 |
attr |
ChatGLMBaseModel.quant_config |
1 | 0 | 0 |
attr |
ChatGLMBaseModel.max_position_embeddings |
1 | 0 | 0 |
attr |
ChatGLMBaseModel.transformer |
1 | 0 | 0 |
attr |
ChatGLMBaseModel.lm_head |
1 | 0 | 0 |
attr |
ChatGLMBaseModel.logits_processor |
1 | 0 | 0 |
attr |
ChatGLMBaseModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
GLMMLP.init |
4 | 3 | 0 |
meth |
GLMMLP.forward |
2 | 0 | 0 |
attr |
GLMMLP.add_bias |
1 | 0 | 0 |
attr |
GLMMLP.dense_h_to_4h |
1 | 0 | 0 |
attr |
GLMMLP.activation_func |
1 | 0 | 0 |
attr |
GLMMLP.dense_4h_to_h |
1 | 0 | 0 |
meth |
GLMAttention.init |
5 | 4 | 0 |
attr |
GLMAttention.hidden_size |
1 | 0 | 0 |
attr |
GLMAttention.total_num_heads |
1 | 0 | 0 |
attr |
GLMAttention.num_heads |
1 | 0 | 0 |
attr |
GLMAttention.multi_query_attention |
1 | 0 | 0 |
attr |
GLMAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
GLMAttention.num_kv_heads |
1 | 0 | 0 |
attr |
GLMAttention.head_dim |
1 | 0 | 0 |
attr |
GLMAttention.q_size |
1 | 0 | 0 |
attr |
GLMAttention.kv_size |
1 | 0 | 0 |
attr |
GLMAttention.scaling |
1 | 0 | 0 |
attr |
GLMAttention.query_key_value |
1 | 0 | 0 |
attr |
GLMAttention.dense |
1 | 0 | 0 |
attr |
GLMAttention.rotary_emb |
1 | 0 | 0 |
attr |
GLMAttention.attn |
1 | 0 | 0 |
meth |
GLMTransformer.init |
5 | 4 | 0 |
attr |
GLMTransformer.post_layer_norm |
1 | 0 | 0 |
attr |
GLMTransformer.num_layers |
1 | 0 | 0 |
attr |
GLMTransformer.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GLMTransformer.final_layernorm |
1 | 0 | 0 |
meth |
GLMBlock.init |
5 | 4 | 0 |
attr |
GLMBlock.apply_residual_connection_post_layernorm |
1 | 0 | 0 |
attr |
GLMBlock.fp32_residual_connection |
1 | 0 | 0 |
attr |
GLMBlock.input_layernorm |
1 | 0 | 0 |
attr |
GLMBlock.self_attention |
1 | 0 | 0 |
attr |
GLMBlock.hidden_dropout |
1 | 0 | 0 |
attr |
GLMBlock.post_attention_layernorm |
1 | 0 | 0 |
attr |
GLMBlock.mlp |
1 | 0 | 0 |
vllm.model_executor.models.clip (64 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPTextEmbeddings.init |
2 | 1 | 0 |
attr |
CLIPTextEmbeddings.token_embedding |
1 | 0 | 0 |
attr |
CLIPTextEmbeddings.position_embedding |
1 | 0 | 0 |
attr |
CLIPEncoder.config |
1 | 0 | 0 |
attr |
CLIPEncoder.layers |
1 | 0 | 0 |
prop |
CLIPVisionTransformer.dtype |
1 | 0 | 0 |
prop |
CLIPVisionTransformer.device |
1 | 0 | 0 |
attr |
CLIPVisionTransformer.config |
1 | 0 | 0 |
attr |
CLIPVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
CLIPVisionTransformer.pre_layrnorm |
1 | 0 | 0 |
attr |
CLIPVisionTransformer.encoder |
1 | 0 | 0 |
attr |
CLIPVisionTransformer.post_layernorm |
1 | 0 | 0 |
attr |
CLIPEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
CLIPEncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
CLIPEncoderLayer.mlp |
1 | 0 | 0 |
attr |
CLIPEncoderLayer.layer_norm2 |
1 | 0 | 0 |
attr |
CLIPTextTransformer.config |
1 | 0 | 0 |
attr |
CLIPTextTransformer.embeddings |
1 | 0 | 0 |
attr |
CLIPTextTransformer.encoder |
1 | 0 | 0 |
attr |
CLIPTextTransformer.final_layer_norm |
1 | 0 | 0 |
attr |
CLIPMLP.config |
1 | 0 | 0 |
attr |
CLIPMLP.activation_fn |
1 | 0 | 0 |
attr |
CLIPMLP.fc1 |
1 | 0 | 0 |
attr |
CLIPMLP.fc2 |
1 | 0 | 0 |
meth |
CLIPProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
CLIPProcessingInfo.get_vision_encoder_info |
1 | 0 | 0 |
meth |
CLIPProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
CLIPEmbeddingModel.init |
3 | 2 | 0 |
meth |
CLIPEmbeddingModel.load_weights |
2 | 1 | 0 |
attr |
CLIPEmbeddingModel.config |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.multimodal_config |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.projection_dim |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.text_embed_dim |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.vision_embed_dim |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.pooler_config |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.pooler |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.text_model |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.text_projection |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.vision_model |
1 | 0 | 0 |
attr |
CLIPEmbeddingModel.visual_projection |
1 | 0 | 0 |
meth |
CLIPVisionEmbeddings.init |
2 | 1 | 0 |
attr |
CLIPVisionEmbeddings.config |
1 | 0 | 0 |
attr |
CLIPVisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
CLIPVisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
CLIPVisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
CLIPVisionEmbeddings.class_embedding |
1 | 0 | 0 |
attr |
CLIPVisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
CLIPVisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
CLIPVisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
CLIPVisionEmbeddings.position_embedding |
1 | 0 | 0 |
prop |
CLIPVisionModel.dtype |
1 | 0 | 0 |
prop |
CLIPVisionModel.device |
1 | 0 | 0 |
attr |
CLIPVisionModel.vision_model |
1 | 0 | 0 |
meth |
CLIPAttention.forward |
2 | 1 | 0 |
attr |
CLIPAttention.config |
1 | 0 | 0 |
attr |
CLIPAttention.embed_dim |
1 | 0 | 0 |
attr |
CLIPAttention.num_heads |
1 | 0 | 0 |
attr |
CLIPAttention.head_dim |
1 | 0 | 0 |
attr |
CLIPAttention.scale |
1 | 0 | 0 |
attr |
CLIPAttention.qkv_proj |
1 | 0 | 0 |
attr |
CLIPAttention.out_proj |
1 | 0 | 0 |
attr |
CLIPAttention.tp_size |
1 | 0 | 0 |
attr |
CLIPAttention.num_heads_per_partition |
1 | 0 | 0 |
attr |
CLIPAttention.attn |
1 | 0 | 0 |
vllm.model_executor.models.cohere2_vision (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2VisionForConditionalGeneration.init |
3 | 2 | 0 |
meth |
Cohere2VisionForConditionalGeneration._process_image_input |
3 | 2 | 0 |
meth |
Cohere2VisionForConditionalGeneration._patch_quant_config |
3 | 2 | 0 |
prop |
Cohere2VisionForConditionalGeneration.dtype |
1 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
Cohere2VisionProcessingInfo.get_image_processor |
2 | 1 | 0 |
meth |
Cohere2VisionMultiModalProjector.init |
3 | 2 | 0 |
meth |
Cohere2VisionMultiModalProjector.forward |
2 | 0 | 0 |
attr |
Cohere2VisionMultiModalProjector.downsample_factor |
1 | 0 | 0 |
attr |
Cohere2VisionMultiModalProjector.intermediate_size |
1 | 0 | 0 |
attr |
Cohere2VisionMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
Cohere2VisionMultiModalProjector.act |
1 | 0 | 0 |
attr |
Cohere2VisionMultiModalProjector.linear_2 |
1 | 0 | 0 |
vllm.model_executor.models.colbert (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColBERTModel.init |
3 | 2 | 0 |
meth |
ColBERTModel.load_weights |
2 | 1 | 0 |
meth |
ColBERTJinaRobertaModel.init |
3 | 2 | 0 |
meth |
ColBERTJinaRobertaModel.forward |
5 | 4 | 0 |
meth |
ColBERTJinaRobertaModel.load_weights |
2 | 1 | 0 |
attr |
ColBERTJinaRobertaModel.model |
1 | 0 | 0 |
attr |
ColBERTJinaRobertaModel.pooler |
1 | 0 | 0 |
meth |
ColBERTMixin.get_colbert_dim_from_config |
2 | 1 | 0 |
meth |
ColBERTModernBertModel.init |
3 | 2 | 0 |
meth |
ColBERTModernBertModel.forward |
5 | 4 | 0 |
meth |
ColBERTModernBertModel.load_weights |
2 | 1 | 0 |
attr |
ColBERTModernBertModel.model |
1 | 0 | 0 |
attr |
ColBERTModernBertModel.pooler |
1 | 0 | 0 |
vllm.model_executor.models.colmodernvbert (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColModernVBertConnector.init |
2 | 1 | 0 |
attr |
ColModernVBertConnector.pixel_shuffle_factor |
1 | 0 | 0 |
attr |
ColModernVBertConnector.proj |
1 | 0 | 0 |
meth |
ColModernVBertForRetrieval.init |
3 | 2 | 0 |
attr |
ColModernVBertForRetrieval.config |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.vision_model |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.connector |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.text_embeddings |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.text_layers |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.text_final_norm |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.custom_text_proj |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.pooler |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.hf_to_vllm_mapper |
1 | 0 | 0 |
vllm.model_executor.models.colqwen3 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColQwen3ProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
ColQwen3ProcessingInfo.get_video_processor |
2 | 1 | 0 |
meth |
ColQwen3ProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
ColQwen3Model.init |
3 | 2 | 0 |
meth |
ColQwen3Model.forward |
6 | 5 | 0 |
attr |
ColQwen3Model.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
ColQwen3Model.pooler |
1 | 0 | 0 |
attr |
ColQwen3Model.custom_text_proj |
1 | 0 | 0 |
vllm.model_executor.models.commandr (64 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayerNorm.init |
3 | 0 | 0 |
meth |
LayerNorm.forward |
3 | 0 | 0 |
attr |
LayerNorm.weight |
1 | 0 | 0 |
attr |
LayerNorm.variance_epsilon |
1 | 0 | 0 |
meth |
CohereDecoderLayer.init |
5 | 4 | 0 |
attr |
CohereDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
CohereDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
CohereDecoderLayer.mlp |
1 | 0 | 0 |
attr |
CohereDecoderLayer.input_layernorm |
1 | 0 | 0 |
meth |
CohereForCausalLM.init |
3 | 2 | 0 |
attr |
CohereForCausalLM.config |
1 | 0 | 0 |
attr |
CohereForCausalLM.quant_config |
1 | 0 | 0 |
attr |
CohereForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
CohereForCausalLM.model |
1 | 0 | 0 |
attr |
CohereForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
func |
layer_norm_func |
4 | 0 | 0 |
meth |
CohereModel.init |
3 | 2 | 0 |
attr |
CohereModel.quant_config |
1 | 0 | 0 |
attr |
CohereModel.config |
1 | 0 | 0 |
attr |
CohereModel.vocab_size |
1 | 0 | 0 |
attr |
CohereModel.embed_tokens |
1 | 0 | 0 |
attr |
CohereModel.norm |
1 | 0 | 0 |
attr |
CohereModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
CohereMLP.init |
4 | 3 | 0 |
meth |
CohereMLP.forward |
2 | 0 | 0 |
attr |
CohereMLP.config |
1 | 0 | 0 |
attr |
CohereMLP.hidden_size |
1 | 0 | 0 |
attr |
CohereMLP.intermediate_size |
1 | 0 | 0 |
attr |
CohereMLP.gate_up_proj |
1 | 0 | 0 |
attr |
CohereMLP.down_proj |
1 | 0 | 0 |
attr |
CohereMLP.act_fn |
1 | 0 | 0 |
meth |
CohereAttention.init |
5 | 4 | 0 |
meth |
CohereAttention._apply_qk_norm |
3 | 0 | 0 |
attr |
CohereAttention.config |
1 | 0 | 0 |
attr |
CohereAttention.attention_dropout |
1 | 0 | 0 |
attr |
CohereAttention.hidden_size |
1 | 0 | 0 |
attr |
CohereAttention.total_num_heads |
1 | 0 | 0 |
attr |
CohereAttention.num_heads |
1 | 0 | 0 |
attr |
CohereAttention.head_dim |
1 | 0 | 0 |
attr |
CohereAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
CohereAttention.num_kv_heads |
1 | 0 | 0 |
attr |
CohereAttention.q_size |
1 | 0 | 0 |
attr |
CohereAttention.kv_size |
1 | 0 | 0 |
attr |
CohereAttention.scaling |
1 | 0 | 0 |
attr |
CohereAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
CohereAttention.use_qk_norm |
1 | 0 | 0 |
attr |
CohereAttention.qkv_proj |
1 | 0 | 0 |
attr |
CohereAttention.o_proj |
1 | 0 | 0 |
attr |
CohereAttention.rotary_emb |
1 | 0 | 0 |
attr |
CohereAttention.v1 |
1 | 0 | 0 |
attr |
CohereAttention.sliding_window |
1 | 0 | 0 |
attr |
CohereAttention.attn |
1 | 0 | 0 |
attr |
CohereAttention.q_norm |
1 | 0 | 0 |
attr |
CohereAttention.k_norm |
1 | 0 | 0 |
vllm.model_executor.models.config (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.dbrx (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DbrxMoE.init |
5 | 4 | 0 |
attr |
DbrxMoE.d_model |
1 | 0 | 0 |
attr |
DbrxMoE.params_dtype |
1 | 0 | 0 |
attr |
DbrxMoE.router |
1 | 0 | 0 |
attr |
DbrxMoE.experts |
1 | 0 | 0 |
meth |
DbrxFusedNormAttention.init |
5 | 4 | 0 |
attr |
DbrxFusedNormAttention.d_model |
1 | 0 | 0 |
attr |
DbrxFusedNormAttention.attn |
1 | 0 | 0 |
attr |
DbrxFusedNormAttention.norm_1 |
1 | 0 | 0 |
attr |
DbrxFusedNormAttention.norm_2 |
1 | 0 | 0 |
meth |
DbrxAttention.init |
5 | 4 | 0 |
attr |
DbrxAttention.d_model |
1 | 0 | 0 |
attr |
DbrxAttention.total_num_heads |
1 | 0 | 0 |
attr |
DbrxAttention.head_dim |
1 | 0 | 0 |
attr |
DbrxAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
DbrxAttention.clip_qkv |
1 | 0 | 0 |
attr |
DbrxAttention.max_position |
1 | 0 | 0 |
attr |
DbrxAttention.Wqkv |
1 | 0 | 0 |
attr |
DbrxAttention.out_proj |
1 | 0 | 0 |
attr |
DbrxAttention.rotary_emb |
1 | 0 | 0 |
attr |
DbrxAttention.tp_size |
1 | 0 | 0 |
attr |
DbrxAttention.num_heads |
1 | 0 | 0 |
attr |
DbrxAttention.num_kv_heads |
1 | 0 | 0 |
attr |
DbrxAttention.q_size |
1 | 0 | 0 |
attr |
DbrxAttention.kv_size |
1 | 0 | 0 |
attr |
DbrxAttention.scaling |
1 | 0 | 0 |
attr |
DbrxAttention.attn |
1 | 0 | 0 |
meth |
DbrxExperts.init |
5 | 4 | 0 |
meth |
DbrxExperts.weight_loader |
5 | 4 | 0 |
attr |
DbrxExperts.config |
1 | 0 | 0 |
attr |
DbrxExperts.d_model |
1 | 0 | 0 |
attr |
DbrxExperts.intermediate_size |
1 | 0 | 0 |
meth |
DbrxRouter.init |
3 | 2 | 0 |
attr |
DbrxRouter.tp_size |
1 | 0 | 0 |
attr |
DbrxRouter.num_total_experts |
1 | 0 | 0 |
attr |
DbrxRouter.d_model |
1 | 0 | 0 |
attr |
DbrxRouter.layer |
1 | 0 | 0 |
meth |
DbrxModel.init |
3 | 2 | 0 |
attr |
DbrxModel.quant_config |
1 | 0 | 0 |
attr |
DbrxModel.wte |
1 | 0 | 0 |
attr |
DbrxModel.norm_f |
1 | 0 | 0 |
attr |
DbrxModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
DbrxBlock.init |
5 | 4 | 0 |
attr |
DbrxBlock.norm_attn_norm |
1 | 0 | 0 |
attr |
DbrxBlock.ffn |
1 | 0 | 0 |
meth |
DbrxForCausalLM.init |
3 | 2 | 0 |
attr |
DbrxForCausalLM.config |
1 | 0 | 0 |
attr |
DbrxForCausalLM.quant_config |
1 | 0 | 0 |
attr |
DbrxForCausalLM.transformer |
1 | 0 | 0 |
attr |
DbrxForCausalLM.lm_head |
1 | 0 | 0 |
attr |
DbrxForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
DbrxForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.deepencoder (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
build_sam_vit_b |
1 | 0 | 0 |
attr |
MLPBlock.lin1 |
1 | 0 | 0 |
attr |
MLPBlock.lin2 |
1 | 0 | 0 |
attr |
MLPBlock.act |
1 | 0 | 0 |
attr |
RelPosAttention.num_heads |
1 | 0 | 0 |
attr |
RelPosAttention.scale |
1 | 0 | 0 |
attr |
RelPosAttention.qkv |
1 | 0 | 0 |
attr |
RelPosAttention.proj |
1 | 0 | 0 |
attr |
RelPosAttention.use_rel_pos |
1 | 0 | 0 |
attr |
RelPosAttention.rel_pos_h |
1 | 0 | 0 |
attr |
RelPosAttention.rel_pos_w |
1 | 0 | 0 |
meth |
DeepCLIPVisionEmbeddings.get_abs_pos |
3 | 2 | 0 |
attr |
LayerNorm2d.weight |
1 | 0 | 0 |
attr |
LayerNorm2d.bias |
1 | 0 | 0 |
attr |
LayerNorm2d.eps |
1 | 0 | 0 |
prop |
DeepCLIPVisionTransformer.dtype |
1 | 0 | 0 |
prop |
DeepCLIPVisionTransformer.device |
1 | 0 | 0 |
attr |
DeepCLIPVisionTransformer.config |
1 | 0 | 0 |
attr |
DeepCLIPVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
DeepCLIPVisionTransformer.pre_layrnorm |
1 | 0 | 0 |
attr |
DeepCLIPVisionTransformer.transformer |
1 | 0 | 0 |
meth |
ImageEncoderViT.get_abs_pos |
3 | 2 | 0 |
attr |
ImageEncoderViT.img_size |
1 | 0 | 0 |
attr |
ImageEncoderViT.patch_embed |
1 | 0 | 0 |
attr |
ImageEncoderViT.blocks |
1 | 0 | 0 |
attr |
ImageEncoderViT.neck |
1 | 0 | 0 |
attr |
ImageEncoderViT.net_2 |
1 | 0 | 0 |
attr |
ImageEncoderViT.net_3 |
1 | 0 | 0 |
attr |
Block.norm1 |
1 | 0 | 0 |
attr |
Block.attn |
1 | 0 | 0 |
attr |
Block.norm2 |
1 | 0 | 0 |
attr |
Block.mlp |
1 | 0 | 0 |
attr |
Block.window_size |
1 | 0 | 0 |
attr |
PatchEmbed.proj |
1 | 0 | 0 |
vllm.model_executor.models.deepencoder2 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2Decoder2Encoder.init |
6 | 5 | 0 |
attr |
Qwen2Decoder2Encoder.model |
1 | 0 | 0 |
attr |
Qwen2Decoder2Encoder.query_768 |
1 | 0 | 0 |
attr |
Qwen2Decoder2Encoder.query_1024 |
1 | 0 | 0 |
func |
build_qwen2_decoder_as_encoder |
6 | 0 | 0 |
meth |
CustomQwen2Decoder.init |
14 | 13 | 0 |
meth |
CustomQwen2Decoder._create_custom_model |
3 | 0 | 0 |
meth |
CustomQwen2Decoder.forward |
5 | 3 | 0 |
attr |
CustomQwen2Decoder.model |
1 | 0 | 0 |
vllm.model_executor.models.deepseek_eagle (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EagleDeepseekV3ForCausalLM.init |
3 | 2 | 0 |
meth |
EagleDeepseekV3ForCausalLM.load_weights |
2 | 1 | 0 |
attr |
EagleDeepseekV3ForCausalLM.config |
1 | 0 | 0 |
attr |
EagleDeepseekV3ForCausalLM.model |
1 | 0 | 0 |
attr |
EagleDeepseekV3ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
EagleDeepseekV3ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
EagleDeepseekV3ForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
DeepseekV2Model.config |
1 | 0 | 0 |
attr |
DeepseekV2Model.vocab_size |
1 | 0 | 0 |
attr |
DeepseekV2Model.embed_tokens |
1 | 0 | 0 |
attr |
DeepseekV2Model.layers |
1 | 0 | 0 |
attr |
DeepseekV2Model.fc |
1 | 0 | 0 |
attr |
DeepseekV2Model.enorm |
1 | 0 | 0 |
attr |
DeepseekV2Model.hnorm |
1 | 0 | 0 |
attr |
DeepseekV2Model.norm |
1 | 0 | 0 |
vllm.model_executor.models.deepseek_mtp (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepSeekMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
DeepSeekMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictor.logits_processor |
1 | 0 | 0 |
attr |
SharedHead.norm |
1 | 0 | 0 |
attr |
SharedHead.head |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictorLayer.config |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictorLayer.enorm |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictorLayer.hnorm |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictorLayer.eh_proj |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictorLayer.device |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictorLayer.is_v32 |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictorLayer.shared_head |
1 | 0 | 0 |
attr |
DeepSeekMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
DeepSeekMTP.init |
3 | 2 | 0 |
meth |
DeepSeekMTP.set_moe_parameters |
1 | 0 | 0 |
attr |
DeepSeekMTP.config |
1 | 0 | 0 |
attr |
DeepSeekMTP.model |
1 | 0 | 0 |
vllm.model_executor.models.deepseek_ocr (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekOCRProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
DeepseekOCRProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
DeepseekOCRForCausalLM.init |
3 | 2 | 0 |
meth |
DeepseekOCRForCausalLM.forward |
6 | 5 | 0 |
attr |
DeepseekOCRForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.config |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.multimodal_config |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.vision_config |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.projector_config |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.text_config |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.image_token_id |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.sam_model |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.vision_model |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.projector |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.tile_tag |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.global_view_pos |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.language_model |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.image_newline |
1 | 0 | 0 |
attr |
DeepseekOCRForCausalLM.view_seperator |
1 | 0 | 0 |
meth |
NGramPerReqLogitsProcessor.validate_params |
2 | 1 | 0 |
meth |
NoRepeatNGramLogitsProcessor.init |
4 | 3 | 0 |
attr |
NoRepeatNGramLogitsProcessor.ngram_size |
1 | 0 | 0 |
attr |
NoRepeatNGramLogitsProcessor.window_size |
1 | 0 | 0 |
attr |
NoRepeatNGramLogitsProcessor.whitelist_token_ids |
1 | 0 | 0 |
vllm.model_executor.models.deepseek_ocr2 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekOCR2ForCausalLM.init |
3 | 2 | 0 |
meth |
DeepseekOCR2ForCausalLM.forward |
6 | 5 | 0 |
attr |
DeepseekOCR2ForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.config |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.multimodal_config |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.vision_config |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.projector_config |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.text_config |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.image_token_id |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.sam_model |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.qwen2_model |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.projector |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.tile_tag |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.global_view_pos |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.language_model |
1 | 0 | 0 |
attr |
DeepseekOCR2ForCausalLM.view_seperator |
1 | 0 | 0 |
meth |
DeepseekOCR2ProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
DeepseekOCR2ProcessingInfo.get_hf_processor |
2 | 1 | 0 |
vllm.model_executor.models.deepseek_v2 (147 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekAttention.init |
10 | 9 | 0 |
attr |
DeepseekAttention.hidden_size |
1 | 0 | 0 |
attr |
DeepseekAttention.total_num_heads |
1 | 0 | 0 |
attr |
DeepseekAttention.num_heads |
1 | 0 | 0 |
attr |
DeepseekAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
DeepseekAttention.num_kv_heads |
1 | 0 | 0 |
attr |
DeepseekAttention.head_dim |
1 | 0 | 0 |
attr |
DeepseekAttention.q_size |
1 | 0 | 0 |
attr |
DeepseekAttention.kv_size |
1 | 0 | 0 |
attr |
DeepseekAttention.scaling |
1 | 0 | 0 |
attr |
DeepseekAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
DeepseekAttention.qkv_proj |
1 | 0 | 0 |
attr |
DeepseekAttention.o_proj |
1 | 0 | 0 |
attr |
DeepseekAttention.rotary_emb |
1 | 0 | 0 |
attr |
DeepseekAttention.attn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
DeepseekV32IndexerCache.init |
5 | 4 | 0 |
meth |
DeepseekV32IndexerCache.forward |
1 | 0 | 0 |
attr |
DeepseekV32IndexerCache.kv_cache |
1 | 0 | 0 |
attr |
DeepseekV32IndexerCache.head_dim |
1 | 0 | 0 |
attr |
DeepseekV32IndexerCache.prefix |
1 | 0 | 0 |
attr |
DeepseekV32IndexerCache.cache_config |
1 | 0 | 0 |
attr |
DeepseekV32IndexerCache.dtype |
1 | 0 | 0 |
attr |
DeepseekV2DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
DeepseekV2DecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
DeepseekV2DecoderLayer.use_mha |
1 | 0 | 0 |
attr |
DeepseekV2DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
DeepseekV2DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
DeepseekV2DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
DeepseekV2DecoderLayer.routed_scaling_factor |
1 | 0 | 0 |
attr |
DeepseekV2DecoderLayer.mlp |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.hidden_size |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.qk_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.v_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.q_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.kv_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.num_heads |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.num_local_heads |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.scaling |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.kv_a_layernorm |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.kv_b_proj |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.o_proj |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.rotary_emb |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.is_v32 |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.mla_attn |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.fused_qkv_a_proj |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.q_a_layernorm |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.q_b_proj |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.indexer_rope_emb |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.indexer |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
DeepseekV2MLAAttention.q_proj |
1 | 0 | 0 |
meth |
DeepSeekV2FusedQkvAProj.init |
5 | 4 | 0 |
meth |
DeepSeekV2FusedQkvAProj.forward |
2 | 1 | 0 |
meth |
DeepseekV2MLP.init |
8 | 7 | 0 |
meth |
DeepseekV2MLP.forward |
2 | 0 | 0 |
attr |
DeepseekV2MLP.gate_up_proj |
1 | 0 | 0 |
attr |
DeepseekV2MLP.down_proj |
1 | 0 | 0 |
attr |
DeepseekV2MLP.act_fn |
1 | 0 | 0 |
meth |
DeepseekV2Model.init |
3 | 2 | 0 |
attr |
DeepseekV2Model.config |
1 | 0 | 0 |
attr |
DeepseekV2Model.device |
1 | 0 | 0 |
attr |
DeepseekV2Model.vocab_size |
1 | 0 | 0 |
attr |
DeepseekV2Model.is_v32 |
1 | 0 | 0 |
attr |
DeepseekV2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
DeepseekV2Model.embed_tokens |
1 | 0 | 0 |
attr |
DeepseekV2Model.norm |
1 | 0 | 0 |
meth |
DeepseekV2ForCausalLM.init |
3 | 2 | 0 |
meth |
DeepseekV2ForCausalLM.set_moe_parameters |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.config |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.use_mha |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.fuse_qkv_a_proj |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.model |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
DeepseekV2MoE.init |
5 | 4 | 0 |
attr |
DeepseekV2MoE.tp_size |
1 | 0 | 0 |
attr |
DeepseekV2MoE.tp_rank |
1 | 0 | 0 |
attr |
DeepseekV2MoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
DeepseekV2MoE.ep_group |
1 | 0 | 0 |
attr |
DeepseekV2MoE.ep_rank |
1 | 0 | 0 |
attr |
DeepseekV2MoE.ep_size |
1 | 0 | 0 |
attr |
DeepseekV2MoE.is_sequence_parallel |
1 | 0 | 0 |
attr |
DeepseekV2MoE.gate |
1 | 0 | 0 |
attr |
DeepseekV2MoE.enable_eplb |
1 | 0 | 0 |
attr |
DeepseekV2MoE.n_redundant_experts |
1 | 0 | 0 |
attr |
DeepseekV2MoE.n_logical_experts |
1 | 0 | 0 |
attr |
DeepseekV2MoE.n_physical_experts |
1 | 0 | 0 |
attr |
DeepseekV2MoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
DeepseekV2MoE.physical_expert_start |
1 | 0 | 0 |
attr |
DeepseekV2MoE.physical_expert_end |
1 | 0 | 0 |
attr |
DeepseekV2MoE.is_rocm_aiter_moe_enabled |
1 | 0 | 0 |
attr |
DeepseekV2MoE.is_fusion_moe_shared_experts_enabled |
1 | 0 | 0 |
attr |
DeepseekV2MoE.experts |
1 | 0 | 0 |
attr |
DeepseekV2MoE.shared_experts |
1 | 0 | 0 |
attr |
DeepseekV2Attention.hidden_size |
1 | 0 | 0 |
attr |
DeepseekV2Attention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Attention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Attention.qk_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Attention.v_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Attention.q_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV2Attention.kv_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV2Attention.num_heads |
1 | 0 | 0 |
attr |
DeepseekV2Attention.num_local_heads |
1 | 0 | 0 |
attr |
DeepseekV2Attention.scaling |
1 | 0 | 0 |
attr |
DeepseekV2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
DeepseekV2Attention.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
DeepseekV2Attention.kv_a_layernorm |
1 | 0 | 0 |
attr |
DeepseekV2Attention.kv_b_proj |
1 | 0 | 0 |
attr |
DeepseekV2Attention.o_proj |
1 | 0 | 0 |
attr |
DeepseekV2Attention.rotary_emb |
1 | 0 | 0 |
attr |
DeepseekV2Attention.attn |
1 | 0 | 0 |
attr |
DeepseekV2Attention.q_a_proj |
1 | 0 | 0 |
attr |
DeepseekV2Attention.q_a_layernorm |
1 | 0 | 0 |
attr |
DeepseekV2Attention.q_b_proj |
1 | 0 | 0 |
attr |
DeepseekV2Attention.q_proj |
1 | 0 | 0 |
meth |
Indexer.init |
9 | 8 | 0 |
meth |
Indexer.forward |
5 | 3 | 0 |
attr |
Indexer.vllm_config |
1 | 0 | 0 |
attr |
Indexer.config |
1 | 0 | 0 |
attr |
Indexer.topk_tokens |
1 | 0 | 0 |
attr |
Indexer.n_head |
1 | 0 | 0 |
attr |
Indexer.head_dim |
1 | 0 | 0 |
attr |
Indexer.rope_dim |
1 | 0 | 0 |
attr |
Indexer.q_lora_rank |
1 | 0 | 0 |
attr |
Indexer.wq_b |
1 | 0 | 0 |
attr |
Indexer.wk |
1 | 0 | 0 |
attr |
Indexer.k_norm |
1 | 0 | 0 |
attr |
Indexer.weights_proj |
1 | 0 | 0 |
attr |
Indexer.softmax_scale |
1 | 0 | 0 |
attr |
Indexer.scale_fmt |
1 | 0 | 0 |
attr |
Indexer.quant_block_size |
1 | 0 | 0 |
attr |
Indexer.topk_indices_buffer |
1 | 0 | 0 |
attr |
Indexer.k_cache |
1 | 0 | 0 |
attr |
Indexer.max_model_len |
1 | 0 | 0 |
attr |
Indexer.prefix |
1 | 0 | 0 |
attr |
Indexer.max_total_seq_len |
1 | 0 | 0 |
attr |
Indexer.indexer_op |
1 | 0 | 0 |
meth |
DeepseekV2MixtureOfExperts.extract_moe_parameters |
2 | 1 | 0 |
vllm.model_executor.models.deepseek_vl2 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MlpProjector.init |
2 | 1 | 0 |
meth |
MlpProjector.forward |
2 | 0 | 0 |
attr |
MlpProjector.cfg |
1 | 0 | 0 |
attr |
MlpProjector.projector_type |
1 | 0 | 0 |
attr |
MlpProjector.layers |
1 | 0 | 0 |
meth |
DeepseekVLV2ForCausalLM.init |
3 | 2 | 0 |
meth |
DeepseekVLV2ForCausalLM._get_parent_and_attr |
3 | 2 | 0 |
meth |
DeepseekVLV2ForCausalLM.patch_vit_for_tp |
3 | 2 | 0 |
meth |
DeepseekVLV2ForCausalLM.forward |
6 | 5 | 0 |
attr |
DeepseekVLV2ForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.config |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.multimodal_config |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.vision_config |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.projector_config |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.text_config |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.vision |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.projector |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.tile_tag |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.global_view_pos |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.language_model |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.image_newline |
1 | 0 | 0 |
attr |
DeepseekVLV2ForCausalLM.view_seperator |
1 | 0 | 0 |
meth |
DeepseekVL2ProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
DeepseekVL2ProcessingInfo.get_hf_processor |
2 | 1 | 0 |
vllm.model_executor.models.dots1 (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dots1Model.init |
3 | 2 | 0 |
attr |
Dots1Model.config |
1 | 0 | 0 |
attr |
Dots1Model.vocab_size |
1 | 0 | 0 |
attr |
Dots1Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Dots1Model.embed_tokens |
1 | 0 | 0 |
attr |
Dots1Model.norm |
1 | 0 | 0 |
attr |
Dots1Attention.hidden_size |
1 | 0 | 0 |
attr |
Dots1Attention.total_num_heads |
1 | 0 | 0 |
attr |
Dots1Attention.num_heads |
1 | 0 | 0 |
attr |
Dots1Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Dots1Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Dots1Attention.head_dim |
1 | 0 | 0 |
attr |
Dots1Attention.q_size |
1 | 0 | 0 |
attr |
Dots1Attention.kv_size |
1 | 0 | 0 |
attr |
Dots1Attention.scaling |
1 | 0 | 0 |
attr |
Dots1Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Dots1Attention.qkv_proj |
1 | 0 | 0 |
attr |
Dots1Attention.o_proj |
1 | 0 | 0 |
attr |
Dots1Attention.rotary_emb |
1 | 0 | 0 |
attr |
Dots1Attention.attn |
1 | 0 | 0 |
attr |
Dots1Attention.q_norm |
1 | 0 | 0 |
attr |
Dots1Attention.k_norm |
1 | 0 | 0 |
meth |
Dots1MoE.init |
4 | 3 | 0 |
attr |
Dots1MoE.tp_size |
1 | 0 | 0 |
attr |
Dots1MoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
Dots1MoE.n_shared_experts |
1 | 0 | 0 |
attr |
Dots1MoE.gate |
1 | 0 | 0 |
attr |
Dots1MoE.experts |
1 | 0 | 0 |
attr |
Dots1MoE.shared_experts |
1 | 0 | 0 |
meth |
Dots1ForCausalLM.init |
3 | 2 | 0 |
attr |
Dots1ForCausalLM.config |
1 | 0 | 0 |
attr |
Dots1ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Dots1ForCausalLM.model |
1 | 0 | 0 |
attr |
Dots1ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Dots1ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Dots1ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Dots1DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Dots1DecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Dots1DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Dots1DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Dots1DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Dots1DecoderLayer.routed_scaling_factor |
1 | 0 | 0 |
attr |
Dots1DecoderLayer.mlp |
1 | 0 | 0 |
meth |
Dots1MLP.forward |
2 | 0 | 0 |
attr |
Dots1MLP.gate_up_proj |
1 | 0 | 0 |
attr |
Dots1MLP.down_proj |
1 | 0 | 0 |
attr |
Dots1MLP.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.dots_ocr (60 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
DotsVisionTransformer.config |
1 | 0 | 0 |
attr |
DotsVisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
DotsVisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
DotsVisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
DotsVisionTransformer.attn_backend |
1 | 0 | 0 |
attr |
DotsVisionTransformer.out_hidden_size |
1 | 0 | 0 |
attr |
DotsVisionTransformer.blocks |
1 | 0 | 0 |
attr |
DotsVisionTransformer.merger |
1 | 0 | 0 |
attr |
DotsVisionTransformer.post_trunk_norm |
1 | 0 | 0 |
meth |
DotsVisionBlock.init |
4 | 2 | 0 |
attr |
DotsVisionBlock.attn |
1 | 0 | 0 |
attr |
DotsVisionBlock.norm1 |
1 | 0 | 0 |
attr |
DotsVisionBlock.mlp |
1 | 0 | 0 |
attr |
DotsVisionBlock.norm2 |
1 | 0 | 0 |
meth |
PatchMerger.init |
6 | 5 | 0 |
attr |
PatchMerger.hidden_size |
1 | 0 | 0 |
attr |
PatchMerger.pre_norm |
1 | 0 | 0 |
attr |
PatchMerger.mlp |
1 | 0 | 0 |
attr |
PatchMerger.ln_q |
1 | 0 | 0 |
meth |
DotsOCRForCausalLM.init |
3 | 2 | 0 |
meth |
DotsOCRForCausalLM.forward |
6 | 5 | 0 |
attr |
DotsOCRForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
DotsOCRForCausalLM.quant_config |
1 | 0 | 0 |
attr |
DotsOCRForCausalLM.use_data_parallel |
1 | 0 | 0 |
attr |
DotsOCRForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
DotsOCRForCausalLM.vision_tower |
1 | 0 | 0 |
meth |
DotsPatchEmbed.init |
2 | 0 | 0 |
meth |
DotsPatchEmbed.forward |
3 | 2 | 0 |
attr |
DotsPatchEmbed.num_channels |
1 | 0 | 0 |
attr |
DotsPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
DotsPatchEmbed.temporal_patch_size |
1 | 0 | 0 |
attr |
DotsPatchEmbed.embed_dim |
1 | 0 | 0 |
attr |
DotsPatchEmbed.config |
1 | 0 | 0 |
attr |
DotsPatchEmbed.proj |
1 | 0 | 0 |
attr |
DotsPatchEmbed.norm |
1 | 0 | 0 |
meth |
DotsSwiGLUFFN.init |
4 | 2 | 0 |
attr |
DotsSwiGLUFFN.fc13 |
1 | 0 | 0 |
attr |
DotsSwiGLUFFN.fc2 |
1 | 0 | 0 |
attr |
DotsSwiGLUFFN.act_fn |
1 | 0 | 0 |
meth |
DotsViTPreprocessor.init |
2 | 0 | 0 |
meth |
DotsViTPreprocessor.forward |
3 | 2 | 0 |
attr |
DotsViTPreprocessor.patch_h |
1 | 0 | 0 |
attr |
DotsViTPreprocessor.patch_w |
1 | 0 | 0 |
attr |
DotsViTPreprocessor.embed_dim |
1 | 0 | 0 |
attr |
DotsViTPreprocessor.config |
1 | 0 | 0 |
attr |
DotsViTPreprocessor.patchifier |
1 | 0 | 0 |
meth |
DotsVisionAttention.init |
7 | 6 | 0 |
attr |
DotsVisionAttention.embed_dim |
1 | 0 | 0 |
attr |
DotsVisionAttention.tp_size |
1 | 0 | 0 |
attr |
DotsVisionAttention.tp_rank |
1 | 0 | 0 |
attr |
DotsVisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
DotsVisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
DotsVisionAttention.qkv |
1 | 0 | 0 |
attr |
DotsVisionAttention.proj |
1 | 0 | 0 |
attr |
DotsVisionAttention.attn |
1 | 0 | 0 |
attr |
DotsVisionAttention.apply_rotary_emb |
1 | 0 | 0 |
vllm.model_executor.models.eagle2_5_vl (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Eagle2_5_VLProcessingInfo.get_hf_processor |
2 | 1 | 0 |
attr |
Eagle2_5_VLProcessor.config |
1 | 0 | 0 |
attr |
Eagle2_5_VLProcessor.tokenizer |
1 | 0 | 0 |
attr |
Eagle2_5_VLProcessor.num_image_token |
1 | 0 | 0 |
attr |
Eagle2_5_VLProcessor.image_size |
1 | 0 | 0 |
attr |
Eagle2_5_VLProcessor.min_dynamic_patch |
1 | 0 | 0 |
attr |
Eagle2_5_VLProcessor.max_dynamic_patch |
1 | 0 | 0 |
attr |
Eagle2_5_VLProcessor.dynamic_image_size |
1 | 0 | 0 |
meth |
Eagle2_5_VLForConditionalGeneration._init_vision_model |
4 | 3 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.patch_size |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.downsample_ratio |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.num_image_token |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.select_layer |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.img_context_token_id |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.mlp1 |
1 | 0 | 0 |
attr |
Eagle2_5_VLForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.ernie45 (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5ForCausalLM.init |
3 | 2 | 0 |
vllm.model_executor.models.ernie45_moe (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_MoeModel.init |
3 | 2 | 0 |
attr |
Ernie4_5_MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.config |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.num_redundant_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.norm |
1 | 0 | 0 |
attr |
Ernie4_5_MoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Ernie4_5_MoeDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Ernie4_5_MoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Ernie4_5_MoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Ernie4_5_MoeDecoderLayer.mlp |
1 | 0 | 0 |
meth |
Ernie4_5_MoeForCausalLM.init |
3 | 2 | 0 |
attr |
Ernie4_5_MoeForCausalLM.config |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.expert_weights |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_expert_groups |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_logical_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_physical_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_local_physical_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_routed_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_shared_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_redundant_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.layer_idx |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.num_heads |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.head_dim |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.q_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.kv_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.scaling |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.o_proj |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
Ernie4_5_MoeAttention.attn |
1 | 0 | 0 |
meth |
Ernie4_5_MoeMoE.init |
5 | 4 | 0 |
attr |
Ernie4_5_MoeMoE.layer_idx |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.tp_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.moe_num_shared_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.ep_group |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.ep_rank |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.ep_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.enable_eplb |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.n_redundant_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.n_logical_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.n_physical_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.physical_expert_start |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.physical_expert_end |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.has_shared_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.gate |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMoE.shared_experts |
1 | 0 | 0 |
meth |
Ernie4_5_MoeMLP.forward |
2 | 0 | 0 |
attr |
Ernie4_5_MoeMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMLP.down_proj |
1 | 0 | 0 |
attr |
Ernie4_5_MoeMLP.act_fn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.ernie45_vl (81 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
smart_resize |
6 | 5 | 0 |
meth |
Ernie4_5_VLDummyInputsBuilder._get_dummy_videos |
6 | 5 | 0 |
meth |
Ernie4_5_VisionTransformer.init |
5 | 4 | 0 |
meth |
Ernie4_5_VisionTransformer.forward |
4 | 3 | 0 |
meth |
Ernie4_5_VisionTransformer.load_weights |
2 | 1 | 0 |
attr |
Ernie4_5_VisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VisionTransformer.num_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VisionTransformer.embed_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
Ernie4_5_VisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
Ernie4_5_VisionTransformer.blocks |
1 | 0 | 0 |
attr |
Ernie4_5_VisionTransformer.ln |
1 | 0 | 0 |
attr |
Ernie4_5_VisionTransformer.attn_backend |
1 | 0 | 0 |
func |
all_gather_interleave |
4 | 2 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.forward |
6 | 4 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.visual_token_mask |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.resampler_model |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Ernie4_5_VisionRotaryEmbedding.inv_freq |
1 | 0 | 0 |
attr |
Ernie4_5_VisionBlock.norm1 |
1 | 0 | 0 |
attr |
Ernie4_5_VisionBlock.norm2 |
1 | 0 | 0 |
attr |
Ernie4_5_VisionBlock.attn |
1 | 0 | 0 |
attr |
Ernie4_5_VisionBlock.mlp |
1 | 0 | 0 |
meth |
Ernie4_5_VLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Ernie4_5_VLProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
Ernie4_5_VLProcessingInfo.get_image_processor |
2 | 1 | 0 |
meth |
Ernie4_5_VLProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
VariableResolutionResamplerModel.init |
7 | 2 | 0 |
meth |
VariableResolutionResamplerModel.spatial_conv_reshape |
3 | 0 | 0 |
meth |
VariableResolutionResamplerModel.forward |
3 | 0 | 0 |
attr |
VariableResolutionResamplerModel.in_dim |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.out_dim |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.config |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.spatial_conv_size |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.temporal_conv_size |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.use_temporal_conv |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.spatial_dim |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.temporal_dim |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.spatial_linear1 |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.spatial_gelu |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.spatial_linear2 |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.spatial_norm |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.mlp |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.after_norm |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.temporal_linear1 |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.temporal_gelu |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.temporal_linear2 |
1 | 0 | 0 |
attr |
VariableResolutionResamplerModel.temporal_norm |
1 | 0 | 0 |
meth |
Ernie4_5_VisionMLP.init |
6 | 5 | 0 |
attr |
Ernie4_5_VisionMLP.fc1 |
1 | 0 | 0 |
attr |
Ernie4_5_VisionMLP.act |
1 | 0 | 0 |
attr |
Ernie4_5_VisionMLP.fc2 |
1 | 0 | 0 |
attr |
Ernie4_5_VisionAttention.tp_size |
1 | 0 | 0 |
attr |
Ernie4_5_VisionAttention.tp_rank |
1 | 0 | 0 |
attr |
Ernie4_5_VisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
Ernie4_5_VisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
Ernie4_5_VisionAttention.qkv |
1 | 0 | 0 |
attr |
Ernie4_5_VisionAttention.proj |
1 | 0 | 0 |
attr |
Ernie4_5_VisionAttention.attn |
1 | 0 | 0 |
attr |
Ernie4_5_VisionAttention.apply_rotary_emb |
1 | 0 | 0 |
meth |
Ernie4_5_VisionPatchEmbed.init |
5 | 4 | 0 |
attr |
Ernie4_5_VisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
Ernie4_5_VisionPatchEmbed.in_channels |
1 | 0 | 0 |
attr |
Ernie4_5_VisionPatchEmbed.embed_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VisionPatchEmbed.proj |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.ernie45_vl_moe (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_VLMoeMoE.init |
4 | 3 | 0 |
attr |
Ernie4_5_VLMoeMoE.layer_idx |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.tp_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.has_shared_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.e_score_correction_bias |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.shared_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.text_experts_gate |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.text_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.vision_experts_gate |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeMoE.vision_experts |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeForCausalLM.init |
3 | 2 | 0 |
attr |
Ernie4_5_VLMoeForCausalLM.config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForCausalLM.model |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeModel.init |
3 | 2 | 0 |
attr |
Ernie4_5_VLMoeModel.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.im_patch_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.norm |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeDecoderLayer.mlp |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeMLP.init |
3 | 1 | 0 |
meth |
Ernie4_5_VLMoeMLP.forward |
2 | 0 | 0 |
attr |
Ernie4_5_VLMoeMLP.shared_experts |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.layer_idx |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.num_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.head_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.q_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.kv_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.scaling |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.o_proj |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeAttention.attn |
1 | 0 | 0 |
vllm.model_executor.models.ernie_mtp (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ErnieMultiTokenPredictorLayer.mtp_emb_norm |
1 | 0 | 0 |
attr |
ErnieMultiTokenPredictorLayer.mtp_hidden_norm |
1 | 0 | 0 |
attr |
ErnieMultiTokenPredictorLayer.mtp_linear_proj |
1 | 0 | 0 |
attr |
ErnieMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
meth |
ErnieMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
ErnieMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
ErnieMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
ErnieMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
ErnieMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
ErnieMultiTokenPredictor.logits_processor |
1 | 0 | 0 |
meth |
ErnieMTP.init |
3 | 2 | 0 |
attr |
ErnieMTP.config |
1 | 0 | 0 |
attr |
ErnieMTP.model |
1 | 0 | 0 |
attr |
ErnieMTP.lm_head |
1 | 0 | 0 |
vllm.model_executor.models.exaone (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExaoneModel.init |
3 | 2 | 0 |
attr |
ExaoneModel.config |
1 | 0 | 0 |
attr |
ExaoneModel.quant_config |
1 | 0 | 0 |
attr |
ExaoneModel.vocab_size |
1 | 0 | 0 |
attr |
ExaoneModel.wte |
1 | 0 | 0 |
attr |
ExaoneModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ExaoneModel.ln_f |
1 | 0 | 0 |
attr |
ExaoneBlockAttention.attention |
1 | 0 | 0 |
meth |
ExaoneGatedMLP.forward |
2 | 0 | 0 |
attr |
ExaoneGatedMLP.gate_up_proj |
1 | 0 | 0 |
attr |
ExaoneGatedMLP.c_proj |
1 | 0 | 0 |
attr |
ExaoneGatedMLP.act_fn |
1 | 0 | 0 |
meth |
ExaoneForCausalLM.init |
3 | 2 | 0 |
attr |
ExaoneForCausalLM.config |
1 | 0 | 0 |
attr |
ExaoneForCausalLM.quant_config |
1 | 0 | 0 |
attr |
ExaoneForCausalLM.transformer |
1 | 0 | 0 |
attr |
ExaoneForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ExaoneForCausalLM.lm_head |
1 | 0 | 0 |
attr |
ExaoneForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
ExaoneAttention.hidden_size |
1 | 0 | 0 |
attr |
ExaoneAttention.total_num_heads |
1 | 0 | 0 |
attr |
ExaoneAttention.num_heads |
1 | 0 | 0 |
attr |
ExaoneAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
ExaoneAttention.num_kv_heads |
1 | 0 | 0 |
attr |
ExaoneAttention.head_dim |
1 | 0 | 0 |
attr |
ExaoneAttention.q_size |
1 | 0 | 0 |
attr |
ExaoneAttention.kv_size |
1 | 0 | 0 |
attr |
ExaoneAttention.scaling |
1 | 0 | 0 |
attr |
ExaoneAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
ExaoneAttention.qkv_proj |
1 | 0 | 0 |
attr |
ExaoneAttention.out_proj |
1 | 0 | 0 |
attr |
ExaoneAttention.rotary_emb |
1 | 0 | 0 |
attr |
ExaoneAttention.attn |
1 | 0 | 0 |
attr |
ExaoneDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
ExaoneDecoderLayer.attn |
1 | 0 | 0 |
attr |
ExaoneDecoderLayer.mlp |
1 | 0 | 0 |
attr |
ExaoneDecoderLayer.ln_1 |
1 | 0 | 0 |
attr |
ExaoneDecoderLayer.ln_2 |
1 | 0 | 0 |
vllm.model_executor.models.exaone4 (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Exaone4Attention.hidden_size |
1 | 0 | 0 |
attr |
Exaone4Attention.total_num_heads |
1 | 0 | 0 |
attr |
Exaone4Attention.num_heads |
1 | 0 | 0 |
attr |
Exaone4Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Exaone4Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Exaone4Attention.head_dim |
1 | 0 | 0 |
attr |
Exaone4Attention.q_size |
1 | 0 | 0 |
attr |
Exaone4Attention.kv_size |
1 | 0 | 0 |
attr |
Exaone4Attention.scaling |
1 | 0 | 0 |
attr |
Exaone4Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Exaone4Attention.qkv_proj |
1 | 0 | 0 |
attr |
Exaone4Attention.o_proj |
1 | 0 | 0 |
attr |
Exaone4Attention.q_norm |
1 | 0 | 0 |
attr |
Exaone4Attention.k_norm |
1 | 0 | 0 |
attr |
Exaone4Attention.sliding_window |
1 | 0 | 0 |
attr |
Exaone4Attention.apply_rope_all_layers |
1 | 0 | 0 |
attr |
Exaone4Attention.rotary_emb |
1 | 0 | 0 |
attr |
Exaone4Attention.attn |
1 | 0 | 0 |
meth |
Exaone4ForCausalLM.init |
3 | 2 | 0 |
attr |
Exaone4ForCausalLM.config |
1 | 0 | 0 |
attr |
Exaone4ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Exaone4ForCausalLM.model |
1 | 0 | 0 |
attr |
Exaone4ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Exaone4ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Exaone4ForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
Exaone4GatedMLP.forward |
2 | 0 | 0 |
attr |
Exaone4GatedMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Exaone4GatedMLP.down_proj |
1 | 0 | 0 |
attr |
Exaone4GatedMLP.act_fn |
1 | 0 | 0 |
meth |
Exaone4Model.init |
3 | 2 | 0 |
attr |
Exaone4Model.config |
1 | 0 | 0 |
attr |
Exaone4Model.quant_config |
1 | 0 | 0 |
attr |
Exaone4Model.vocab_size |
1 | 0 | 0 |
attr |
Exaone4Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Exaone4Model.embed_tokens |
1 | 0 | 0 |
attr |
Exaone4Model.norm |
1 | 0 | 0 |
attr |
Exaone4DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Exaone4DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Exaone4DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Exaone4DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Exaone4DecoderLayer.post_feedforward_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.exaone_moe (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExaoneMoe.init |
5 | 4 | 0 |
attr |
ExaoneMoe.tp_size |
1 | 0 | 0 |
attr |
ExaoneMoe.routed_scaling_factor |
1 | 0 | 0 |
attr |
ExaoneMoe.ep_group |
1 | 0 | 0 |
attr |
ExaoneMoe.ep_rank |
1 | 0 | 0 |
attr |
ExaoneMoe.ep_size |
1 | 0 | 0 |
attr |
ExaoneMoe.n_routed_experts |
1 | 0 | 0 |
attr |
ExaoneMoe.gate |
1 | 0 | 0 |
attr |
ExaoneMoe.e_score_correction_bias |
1 | 0 | 0 |
attr |
ExaoneMoe.enable_eplb |
1 | 0 | 0 |
attr |
ExaoneMoe.n_logical_experts |
1 | 0 | 0 |
attr |
ExaoneMoe.n_redundant_experts |
1 | 0 | 0 |
attr |
ExaoneMoe.n_physical_experts |
1 | 0 | 0 |
attr |
ExaoneMoe.n_local_physical_experts |
1 | 0 | 0 |
attr |
ExaoneMoe.physical_expert_start |
1 | 0 | 0 |
attr |
ExaoneMoe.physical_expert_end |
1 | 0 | 0 |
attr |
ExaoneMoe.experts |
1 | 0 | 0 |
attr |
ExaoneMoe.shared_experts |
1 | 0 | 0 |
meth |
ExaoneMoeModel.init |
3 | 2 | 0 |
attr |
ExaoneMoeModel.num_redundant_experts |
1 | 0 | 0 |
attr |
ExaoneMoeModel.config |
1 | 0 | 0 |
attr |
ExaoneMoeModel.quant_config |
1 | 0 | 0 |
attr |
ExaoneMoeModel.vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ExaoneMoeModel.embed_tokens |
1 | 0 | 0 |
attr |
ExaoneMoeModel.norm |
1 | 0 | 0 |
meth |
ExaoneMoeForCausalLM.init |
3 | 2 | 0 |
attr |
ExaoneMoeForCausalLM.config |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.lora_config |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.model |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.unpadded_vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
ExaoneMoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
ExaoneMoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
ExaoneMoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
ExaoneMoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
ExaoneMoeDecoderLayer.mlp |
1 | 0 | 0 |
vllm.model_executor.models.exaone_moe_mtp (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
ExaoneMoeMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.config |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.org_vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.fc |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.norm |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.pre_fc_norm_hidden |
1 | 0 | 0 |
attr |
ExaoneMoeMultiTokenPredictor.pre_fc_norm_embedding |
1 | 0 | 0 |
meth |
ExaoneMoeMTP.init |
3 | 2 | 0 |
meth |
ExaoneMoeMTP.forward |
8 | 7 | 0 |
attr |
ExaoneMoeMTP.vllm_config |
1 | 0 | 0 |
attr |
ExaoneMoeMTP.quant_config |
1 | 0 | 0 |
attr |
ExaoneMoeMTP.config |
1 | 0 | 0 |
attr |
ExaoneMoeMTP.model |
1 | 0 | 0 |
attr |
ExaoneMoeMTP.unpadded_vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeMTP.lm_head |
1 | 0 | 0 |
attr |
ExaoneMoeMTP.logits_processor |
1 | 0 | 0 |
vllm.model_executor.models.extract_hidden_states (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CacheOnlyAttentionLayer.init |
6 | 5 | 0 |
attr |
CacheOnlyAttentionLayer.num_heads |
1 | 0 | 0 |
attr |
CacheOnlyAttentionLayer.head_size |
1 | 0 | 0 |
attr |
CacheOnlyAttentionLayer.layer_name |
1 | 0 | 0 |
attr |
CacheOnlyAttentionLayer.kv_cache_torch_dtype |
1 | 0 | 0 |
attr |
CacheOnlyAttentionLayer.attn_backend |
1 | 0 | 0 |
attr |
CacheOnlyAttentionLayer.impl |
1 | 0 | 0 |
attr |
CacheOnlyAttentionLayer.kv_cache |
1 | 0 | 0 |
attr |
CacheOnlyAttentionLayer.block_size |
1 | 0 | 0 |
meth |
CacheOnlyAttentionBackend.use_cascade_attention |
3 | 1 | 0 |
meth |
ExtractHiddenStatesModel.init |
3 | 2 | 0 |
attr |
ExtractHiddenStatesModel.vllm_config |
1 | 0 | 0 |
attr |
ExtractHiddenStatesModel.hf_config |
1 | 0 | 0 |
attr |
ExtractHiddenStatesModel.hidden_size |
1 | 0 | 0 |
attr |
ExtractHiddenStatesModel.target_num_hidden_layers |
1 | 0 | 0 |
attr |
ExtractHiddenStatesModel.num_hidden_states |
1 | 0 | 0 |
attr |
ExtractHiddenStatesModel.cache_only_layers |
1 | 0 | 0 |
meth |
CacheOnlyAttentionImpl.do_kv_cache_update |
5 | 0 | 0 |
meth |
CacheOnlyAttentionImpl.forward |
3 | 0 | 0 |
attr |
CacheOnlyAttentionImpl.num_heads |
1 | 0 | 0 |
attr |
CacheOnlyAttentionImpl.head_size |
1 | 0 | 0 |
attr |
CacheOnlyAttentionImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
CacheOnlyAttentionImpl.kv_cache_torch_dtype |
1 | 0 | 0 |
attr |
CacheOnlyAttentionImpl.num_queries_per_kv |
1 | 0 | 0 |
meth |
CacheOnlyAttentionMetadata.init |
2 | 1 | 0 |
attr |
CacheOnlyAttentionMetadata.slot_mapping |
1 | 0 | 0 |
meth |
CacheOnlyAttentionMetadataBuilder.init |
5 | 4 | 0 |
func |
basic_cache |
4 | 3 | 0 |
func |
dummy_attention |
3 | 0 | 0 |
vllm.model_executor.models.fairseq2_llama (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Fairseq2LlamaForCausalLM.init |
3 | 2 | 0 |
meth |
Fairseq2LlamaForCausalLM.flag_sharded_weights |
2 | 1 | 0 |
attr |
Fairseq2LlamaForCausalLM.tp_rank |
1 | 0 | 0 |
attr |
Fairseq2LlamaForCausalLM.tp_size |
1 | 0 | 0 |
attr |
Fairseq2LlamaForCausalLM.allow_patterns_overrides |
1 | 0 | 0 |
vllm.model_executor.models.falcon (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconModel.init |
3 | 2 | 0 |
attr |
FalconModel.config |
1 | 0 | 0 |
attr |
FalconModel.embed_dim |
1 | 0 | 0 |
attr |
FalconModel.num_heads |
1 | 0 | 0 |
attr |
FalconModel.use_alibi |
1 | 0 | 0 |
attr |
FalconModel.word_embeddings |
1 | 0 | 0 |
attr |
FalconModel.ln_f |
1 | 0 | 0 |
attr |
FalconModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
FalconMLP.init |
4 | 3 | 0 |
attr |
FalconMLP.dense_h_to_4h |
1 | 0 | 0 |
attr |
FalconMLP.act |
1 | 0 | 0 |
attr |
FalconMLP.reduce_row_parallel_results |
1 | 0 | 0 |
attr |
FalconMLP.dense_4h_to_h |
1 | 0 | 0 |
meth |
FalconAttention.init |
5 | 4 | 0 |
attr |
FalconAttention.hidden_size |
1 | 0 | 0 |
attr |
FalconAttention.total_num_heads |
1 | 0 | 0 |
attr |
FalconAttention.num_heads |
1 | 0 | 0 |
attr |
FalconAttention.head_dim |
1 | 0 | 0 |
attr |
FalconAttention.new_decoder_architecture |
1 | 0 | 0 |
attr |
FalconAttention.multi_query |
1 | 0 | 0 |
attr |
FalconAttention.num_kv_heads |
1 | 0 | 0 |
attr |
FalconAttention.query_key_value |
1 | 0 | 0 |
attr |
FalconAttention.q_size |
1 | 0 | 0 |
attr |
FalconAttention.kv_size |
1 | 0 | 0 |
attr |
FalconAttention.inv_norm_factor |
1 | 0 | 0 |
attr |
FalconAttention.reduce_row_parallel_results |
1 | 0 | 0 |
attr |
FalconAttention.dense |
1 | 0 | 0 |
attr |
FalconAttention.use_rotary |
1 | 0 | 0 |
attr |
FalconAttention.use_alibi |
1 | 0 | 0 |
attr |
FalconAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
FalconAttention.rotary_emb |
1 | 0 | 0 |
attr |
FalconAttention.attn |
1 | 0 | 0 |
meth |
FalconForCausalLM.init |
3 | 2 | 0 |
attr |
FalconForCausalLM.config |
1 | 0 | 0 |
attr |
FalconForCausalLM.quant_config |
1 | 0 | 0 |
attr |
FalconForCausalLM.transformer |
1 | 0 | 0 |
attr |
FalconForCausalLM.tie_word_embeddings |
1 | 0 | 0 |
attr |
FalconForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
FalconForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
FalconForCausalLM.lm_head |
1 | 0 | 0 |
meth |
FalconDecoderLayer.init |
5 | 4 | 0 |
attr |
FalconDecoderLayer.num_heads |
1 | 0 | 0 |
attr |
FalconDecoderLayer.self_attention |
1 | 0 | 0 |
attr |
FalconDecoderLayer.mlp |
1 | 0 | 0 |
attr |
FalconDecoderLayer.config |
1 | 0 | 0 |
attr |
FalconDecoderLayer.reduce_row_parallel_results |
1 | 0 | 0 |
attr |
FalconDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
FalconDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
FalconDecoderLayer.ln_attn |
1 | 0 | 0 |
attr |
FalconDecoderLayer.ln_mlp |
1 | 0 | 0 |
vllm.model_executor.models.falcon_h1 (66 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconH1MLP.forward |
2 | 0 | 0 |
attr |
FalconH1MLP.gate_up_proj |
1 | 0 | 0 |
attr |
FalconH1MLP.down_proj |
1 | 0 | 0 |
attr |
FalconH1MLP.tp_size |
1 | 0 | 0 |
attr |
FalconH1MLP.intermediate_size |
1 | 0 | 0 |
attr |
FalconH1MLP.act_fn |
1 | 0 | 0 |
meth |
FalconH1ForCausalLM.init |
3 | 2 | 0 |
meth |
FalconH1ForCausalLM.forward |
6 | 4 | 0 |
attr |
FalconH1ForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.model_config |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.config |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.model |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.tie_word_embeddings |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.lm_head_multiplier |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
FalconH1AttentionDecoderLayer.self_attention |
4 | 3 | 0 |
meth |
FalconH1AttentionDecoderLayer.forward |
5 | 3 | 0 |
attr |
FalconH1AttentionDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.total_num_heads |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.num_heads |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.total_num_kv_heads |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.num_kv_heads |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.head_dim |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.q_size |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.kv_size |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.scaling |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.max_position_embeddings |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.rotary_emb |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.qkv_proj |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.o_proj |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.attn |
1 | 0 | 0 |
attr |
FalconH1AttentionDecoderLayer.key_multiplier |
1 | 0 | 0 |
meth |
FalconH1ParallelHybrid.forward |
4 | 2 | 0 |
attr |
FalconH1ParallelHybrid.self_attn |
1 | 0 | 0 |
attr |
FalconH1ParallelHybrid.mamba |
1 | 0 | 0 |
attr |
FalconH1ParallelHybrid.ssm_out_multiplier |
1 | 0 | 0 |
attr |
FalconH1ParallelHybrid.ssm_in_multiplier |
1 | 0 | 0 |
attr |
FalconH1ParallelHybrid.attention_in_multiplier |
1 | 0 | 0 |
attr |
FalconH1ParallelHybrid.attn_out_multiplier |
1 | 0 | 0 |
attr |
FalconH1ParallelHybrid.feed_forward |
1 | 0 | 0 |
attr |
FalconH1ParallelHybrid.input_layernorm |
1 | 0 | 0 |
attr |
FalconH1ParallelHybrid.pre_ff_layernorm |
1 | 0 | 0 |
meth |
FalconH1SSMDecoderLayer._init_mup_vector |
1 | 0 | 0 |
meth |
FalconH1SSMDecoderLayer.forward |
4 | 2 | 0 |
attr |
FalconH1SSMDecoderLayer.config |
1 | 0 | 0 |
attr |
FalconH1SSMDecoderLayer.tp_size |
1 | 0 | 0 |
attr |
FalconH1SSMDecoderLayer.d_ssm |
1 | 0 | 0 |
attr |
FalconH1SSMDecoderLayer.mamba |
1 | 0 | 0 |
attr |
FalconH1SSMDecoderLayer.groups_time_state_size |
1 | 0 | 0 |
attr |
FalconH1SSMDecoderLayer.zxbcdt_multipliers |
1 | 0 | 0 |
meth |
FalconH1Model.init |
3 | 2 | 0 |
attr |
FalconH1Model.config |
1 | 0 | 0 |
attr |
FalconH1Model.vocab_size |
1 | 0 | 0 |
attr |
FalconH1Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
FalconH1Model.embed_tokens |
1 | 0 | 0 |
attr |
FalconH1Model.embedding_multiplier |
1 | 0 | 0 |
attr |
FalconH1Model.final_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.fireredasr2 (75 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConformerEncoder.init |
7 | 6 | 0 |
meth |
ConformerEncoder.forward |
4 | 3 | 0 |
attr |
ConformerEncoder.odim |
1 | 0 | 0 |
attr |
ConformerEncoder.input_preprocessor |
1 | 0 | 0 |
attr |
ConformerEncoder.positional_encoding |
1 | 0 | 0 |
attr |
ConformerEncoder.layer_stack |
1 | 0 | 0 |
meth |
EncoderMultiHeadAttention.init |
3 | 2 | 0 |
attr |
EncoderMultiHeadAttention.n_head |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.d_k |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.d_v |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.w_qs |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.w_ks |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.w_vs |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.layer_norm_q |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.layer_norm_k |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.layer_norm_v |
1 | 0 | 0 |
attr |
EncoderMultiHeadAttention.fc |
1 | 0 | 0 |
meth |
ConformerConvolution.init |
3 | 2 | 0 |
attr |
ConformerConvolution.pre_layer_norm |
1 | 0 | 0 |
attr |
ConformerConvolution.pointwise_conv1 |
1 | 0 | 0 |
attr |
ConformerConvolution.padding |
1 | 0 | 0 |
attr |
ConformerConvolution.depthwise_conv |
1 | 0 | 0 |
attr |
ConformerConvolution.batch_norm |
1 | 0 | 0 |
attr |
ConformerConvolution.swish |
1 | 0 | 0 |
attr |
ConformerConvolution.pointwise_conv2 |
1 | 0 | 0 |
meth |
FireRedASR2Encoder.init |
2 | 1 | 0 |
attr |
FireRedASR2Encoder.audio_encoder |
1 | 0 | 0 |
meth |
FireRedASR2Model.init |
3 | 2 | 0 |
attr |
FireRedASR2Model.encoder |
1 | 0 | 0 |
attr |
FireRedASR2Model.encoder_projector |
1 | 0 | 0 |
attr |
FireRedASR2Model.decoder |
1 | 0 | 0 |
meth |
RelPosMultiHeadAttention.init |
3 | 2 | 0 |
meth |
RelPosMultiHeadAttention._rel_shift |
2 | 0 | 0 |
attr |
RelPosMultiHeadAttention.scale |
1 | 0 | 0 |
attr |
RelPosMultiHeadAttention.linear_pos |
1 | 0 | 0 |
attr |
RelPosMultiHeadAttention.pos_bias_u |
1 | 0 | 0 |
attr |
RelPosMultiHeadAttention.pos_bias_v |
1 | 0 | 0 |
meth |
FireRedASR2Adapter.init |
4 | 3 | 0 |
meth |
FireRedASR2Adapter.forward |
3 | 0 | 0 |
attr |
FireRedASR2Adapter.ds |
1 | 0 | 0 |
attr |
FireRedASR2Adapter.linear1 |
1 | 0 | 0 |
attr |
FireRedASR2Adapter.relu |
1 | 0 | 0 |
attr |
FireRedASR2Adapter.linear2 |
1 | 0 | 0 |
meth |
FireRedASR2ForConditionalGeneration.init |
3 | 2 | 0 |
meth |
FireRedASR2ForConditionalGeneration.forward |
5 | 4 | 0 |
attr |
FireRedASR2ForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
FireRedASR2ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
FireRedASR2ForConditionalGeneration.dtype |
1 | 0 | 0 |
attr |
FireRedASR2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
FireRedASR2ForConditionalGeneration.logits_processor |
1 | 0 | 0 |
meth |
Conv2dSubsampling.init |
4 | 3 | 0 |
attr |
Conv2dSubsampling.conv |
1 | 0 | 0 |
attr |
Conv2dSubsampling.out |
1 | 0 | 0 |
attr |
Conv2dSubsampling.subsampling |
1 | 0 | 0 |
attr |
Conv2dSubsampling.context |
1 | 0 | 0 |
meth |
RelPositionalEncoding.init |
3 | 2 | 0 |
attr |
RelPositionalEncoding.pe |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
ConformerFeedForward.init |
2 | 1 | 0 |
attr |
ConformerFeedForward.pre_layer_norm |
1 | 0 | 0 |
attr |
ConformerFeedForward.linear_expand |
1 | 0 | 0 |
attr |
ConformerFeedForward.nonlinear |
1 | 0 | 0 |
attr |
ConformerFeedForward.linear_project |
1 | 0 | 0 |
meth |
RelPosEmbConformerBlock.init |
4 | 0 | 0 |
attr |
RelPosEmbConformerBlock.ffn1 |
1 | 0 | 0 |
attr |
RelPosEmbConformerBlock.mhsa |
1 | 0 | 0 |
attr |
RelPosEmbConformerBlock.conv |
1 | 0 | 0 |
attr |
RelPosEmbConformerBlock.ffn2 |
1 | 0 | 0 |
attr |
RelPosEmbConformerBlock.layer_norm |
1 | 0 | 0 |
vllm.model_executor.models.flex_olmo (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
FlexOlmoDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
FlexOlmoDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
FlexOlmoDecoderLayer.post_feedforward_layernorm |
1 | 0 | 0 |
attr |
FlexOlmoDecoderLayer.mlp |
1 | 0 | 0 |
meth |
FlexOlmoMoE.init |
3 | 2 | 0 |
attr |
FlexOlmoMoE.gate |
1 | 0 | 0 |
attr |
FlexOlmoMoE.experts |
1 | 0 | 0 |
attr |
FlexOlmoMoE.top_k |
1 | 0 | 0 |
meth |
FlexOlmoForCausalLM.init |
4 | 3 | 0 |
meth |
FlexOlmoAttention.init |
3 | 2 | 0 |
attr |
FlexOlmoAttention.k_norm |
1 | 0 | 0 |
attr |
FlexOlmoAttention.q_norm |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.funasr (97 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MultiHeadedAttentionSANM.init |
6 | 5 | 0 |
meth |
MultiHeadedAttentionSANM.forward_fsmn |
4 | 3 | 0 |
meth |
MultiHeadedAttentionSANM.forward_qkv |
2 | 1 | 0 |
meth |
MultiHeadedAttentionSANM.forward_attention |
5 | 4 | 0 |
meth |
MultiHeadedAttentionSANM.forward |
5 | 4 | 0 |
attr |
MultiHeadedAttentionSANM.d_k |
1 | 0 | 0 |
attr |
MultiHeadedAttentionSANM.h |
1 | 0 | 0 |
attr |
MultiHeadedAttentionSANM.out_proj |
1 | 0 | 0 |
attr |
MultiHeadedAttentionSANM.linear_q_k_v |
1 | 0 | 0 |
attr |
MultiHeadedAttentionSANM.attn |
1 | 0 | 0 |
attr |
MultiHeadedAttentionSANM.fsmn_block |
1 | 0 | 0 |
attr |
MultiHeadedAttentionSANM.pad_fn |
1 | 0 | 0 |
meth |
FunASRForConditionalGeneration.init |
3 | 2 | 0 |
meth |
FunASRForConditionalGeneration.forward |
5 | 4 | 0 |
attr |
FunASRForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
FunASRForConditionalGeneration.config |
1 | 0 | 0 |
attr |
FunASRForConditionalGeneration.dtype |
1 | 0 | 0 |
attr |
FunASRForConditionalGeneration.model |
1 | 0 | 0 |
attr |
FunASRForConditionalGeneration.logits_processor |
1 | 0 | 0 |
attr |
FunASRForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
LayerNorm.init |
3 | 0 | 0 |
meth |
LayerNorm.forward |
2 | 1 | 0 |
attr |
LayerNorm.dim |
1 | 0 | 0 |
meth |
FunASREncoder.init |
4 | 3 | 0 |
attr |
FunASREncoder.audio_encoder |
1 | 0 | 0 |
attr |
FunASREncoder.audio_adaptor |
1 | 0 | 0 |
meth |
SinusoidalPositionEncoder.init |
2 | 0 | 0 |
meth |
SinusoidalPositionEncoder.encode |
4 | 3 | 0 |
meth |
SinusoidalPositionEncoder.forward |
2 | 1 | 0 |
meth |
EncoderLayer.init |
4 | 3 | 0 |
meth |
EncoderLayer.forward |
2 | 1 | 0 |
attr |
EncoderLayer.self_attn |
1 | 0 | 0 |
attr |
EncoderLayer.feed_forward |
1 | 0 | 0 |
attr |
EncoderLayer.norm1 |
1 | 0 | 0 |
attr |
EncoderLayer.norm2 |
1 | 0 | 0 |
meth |
FunASRAudioAttention.init |
4 | 3 | 0 |
attr |
FunASRAudioAttention.embed_dim |
1 | 0 | 0 |
attr |
FunASRAudioAttention.num_heads |
1 | 0 | 0 |
attr |
FunASRAudioAttention.head_dim |
1 | 0 | 0 |
attr |
FunASRAudioAttention.num_local_heads |
1 | 0 | 0 |
attr |
FunASRAudioAttention.scaling |
1 | 0 | 0 |
attr |
FunASRAudioAttention.qkv |
1 | 0 | 0 |
attr |
FunASRAudioAttention.out_proj |
1 | 0 | 0 |
attr |
FunASRAudioAttention.attn |
1 | 0 | 0 |
meth |
SenseVoiceEncoderSmall.init |
12 | 10 | 0 |
meth |
SenseVoiceEncoderSmall.forward |
3 | 2 | 0 |
attr |
SenseVoiceEncoderSmall.embed |
1 | 0 | 0 |
attr |
SenseVoiceEncoderSmall.normalize_before |
1 | 0 | 0 |
attr |
SenseVoiceEncoderSmall.encoders0 |
1 | 0 | 0 |
attr |
SenseVoiceEncoderSmall.encoders |
1 | 0 | 0 |
attr |
SenseVoiceEncoderSmall.tp_encoders |
1 | 0 | 0 |
attr |
SenseVoiceEncoderSmall.after_norm |
1 | 0 | 0 |
attr |
SenseVoiceEncoderSmall.tp_norm |
1 | 0 | 0 |
meth |
Transformer.init |
7 | 2 | 0 |
meth |
Transformer.forward |
3 | 2 | 0 |
attr |
Transformer.k |
1 | 0 | 0 |
attr |
Transformer.encoder_dim |
1 | 0 | 0 |
attr |
Transformer.llm_dim |
1 | 0 | 0 |
attr |
Transformer.linear1 |
1 | 0 | 0 |
attr |
Transformer.relu |
1 | 0 | 0 |
attr |
Transformer.linear2 |
1 | 0 | 0 |
attr |
Transformer.blocks |
1 | 0 | 0 |
func |
sequence_mask |
5 | 0 | 0 |
meth |
EncoderLayerSANM.init |
6 | 4 | 0 |
meth |
EncoderLayerSANM.forward |
6 | 2 | 0 |
attr |
EncoderLayerSANM.self_attn |
1 | 0 | 0 |
attr |
EncoderLayerSANM.feed_forward |
1 | 0 | 0 |
attr |
EncoderLayerSANM.norm1 |
1 | 0 | 0 |
attr |
EncoderLayerSANM.norm2 |
1 | 0 | 0 |
attr |
EncoderLayerSANM.in_size |
1 | 0 | 0 |
attr |
EncoderLayerSANM.size |
1 | 0 | 0 |
attr |
EncoderLayerSANM.normalize_before |
1 | 0 | 0 |
meth |
PositionwiseFeedForward.init |
3 | 2 | 0 |
meth |
PositionwiseFeedForward.forward |
2 | 1 | 0 |
attr |
PositionwiseFeedForward.w_1 |
1 | 0 | 0 |
attr |
PositionwiseFeedForward.w_2 |
1 | 0 | 0 |
attr |
PositionwiseFeedForward.activation |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
FunASRModel.init |
3 | 2 | 0 |
attr |
FunASRModel.encoder |
1 | 0 | 0 |
attr |
FunASRModel.decoder |
1 | 0 | 0 |
vllm.model_executor.models.funaudiochat (59 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FunAudioChatProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
FunAudioChatDiscreteEncoder.init |
2 | 1 | 1 |
attr |
FunAudioChatDiscreteEncoder.padding_idx |
1 | 0 | 0 |
attr |
FunAudioChatDiscreteEncoder.group_size |
1 | 0 | 0 |
attr |
FunAudioChatDiscreteEncoder.hidden_size |
1 | 0 | 0 |
attr |
FunAudioChatDiscreteEncoder.continuous_features_mode |
1 | 0 | 0 |
attr |
FunAudioChatDiscreteEncoder.embed_tokens |
1 | 0 | 0 |
attr |
FunAudioChatDiscreteEncoder.output_matching |
1 | 0 | 0 |
attr |
FunAudioChatDiscreteEncoder.continual_output_matching |
1 | 0 | 0 |
meth |
FunAudioChatAudioEncoder.init |
2 | 1 | 1 |
attr |
FunAudioChatAudioEncoder.config |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.num_mel_bins |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.max_source_positions |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.embed_scale |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.n_window |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.conv1 |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.conv2 |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.layers |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.ln_post |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.avg_pooler |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.proj |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.positional_embedding |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoder.audio_bos_eos_token |
1 | 0 | 0 |
meth |
FunAudioChatForConditionalGeneration.init |
3 | 2 | 0 |
attr |
FunAudioChatForConditionalGeneration.config |
1 | 0 | 0 |
attr |
FunAudioChatForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
FunAudioChatForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
FunAudioChatForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
FunAudioChatForConditionalGeneration.continuous_audio_tower |
1 | 0 | 0 |
attr |
FunAudioChatForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
FunAudioChatForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
FunAudioChatAudioAttention.init |
2 | 1 | 1 |
attr |
FunAudioChatAudioAttention.embed_dim |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.total_num_heads |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.dropout |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.head_dim |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.num_key_value_groups |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.config |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.scaling |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.attention_dropout |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.is_decoder |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.is_causal |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.qkv_proj |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.num_heads |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.num_kv_heads |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.q_size |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.kv_size |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.attn |
1 | 0 | 0 |
attr |
FunAudioChatAudioAttention.out_proj |
1 | 0 | 0 |
meth |
FunAudioChatAudioEncoderLayer.init |
2 | 1 | 1 |
attr |
FunAudioChatAudioEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderLayer.self_attn_layer_norm |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderLayer.dropout |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderLayer.activation_fn |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderLayer.activation_dropout |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderLayer.fc1 |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderLayer.fc2 |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderLayer.final_layer_norm |
1 | 0 | 0 |
vllm.model_executor.models.fuyu (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FuyuProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
FuyuProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
FuyuForCausalLM.init |
3 | 2 | 0 |
meth |
FuyuForCausalLM.forward |
6 | 5 | 0 |
attr |
FuyuForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
FuyuForCausalLM.config |
1 | 0 | 0 |
attr |
FuyuForCausalLM.multimodal_config |
1 | 0 | 0 |
attr |
FuyuForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
FuyuForCausalLM.image_token_id |
1 | 0 | 0 |
attr |
FuyuForCausalLM.image_feature_size |
1 | 0 | 0 |
attr |
FuyuForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
FuyuForCausalLM.vision_embed_tokens |
1 | 0 | 0 |
attr |
FuyuForCausalLM.language_model |
1 | 0 | 0 |
vllm.model_executor.models.gemma (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GemmaForCausalLM.init |
3 | 2 | 0 |
attr |
GemmaForCausalLM.config |
1 | 0 | 0 |
attr |
GemmaForCausalLM.quant_config |
1 | 0 | 0 |
attr |
GemmaForCausalLM.model |
1 | 0 | 0 |
attr |
GemmaForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
GemmaForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GemmaDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
GemmaDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
GemmaDecoderLayer.mlp |
1 | 0 | 0 |
attr |
GemmaDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
GemmaDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
GemmaMLP.forward |
2 | 0 | 0 |
attr |
GemmaMLP.gate_up_proj |
1 | 0 | 0 |
attr |
GemmaMLP.down_proj |
1 | 0 | 0 |
attr |
GemmaMLP.act_fn |
1 | 0 | 0 |
meth |
GemmaModel.init |
3 | 2 | 0 |
attr |
GemmaModel.config |
1 | 0 | 0 |
attr |
GemmaModel.embed_tokens |
1 | 0 | 0 |
attr |
GemmaModel.norm |
1 | 0 | 0 |
attr |
GemmaModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GemmaAttention.hidden_size |
1 | 0 | 0 |
attr |
GemmaAttention.total_num_heads |
1 | 0 | 0 |
attr |
GemmaAttention.num_heads |
1 | 0 | 0 |
attr |
GemmaAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
GemmaAttention.num_kv_heads |
1 | 0 | 0 |
attr |
GemmaAttention.head_dim |
1 | 0 | 0 |
attr |
GemmaAttention.q_size |
1 | 0 | 0 |
attr |
GemmaAttention.kv_size |
1 | 0 | 0 |
attr |
GemmaAttention.scaling |
1 | 0 | 0 |
attr |
GemmaAttention.qkv_proj |
1 | 0 | 0 |
attr |
GemmaAttention.o_proj |
1 | 0 | 0 |
attr |
GemmaAttention.rotary_emb |
1 | 0 | 0 |
attr |
GemmaAttention.attn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.gemma2 (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Gemma2Attention.config |
1 | 0 | 0 |
attr |
Gemma2Attention.hidden_size |
1 | 0 | 0 |
attr |
Gemma2Attention.total_num_heads |
1 | 0 | 0 |
attr |
Gemma2Attention.num_heads |
1 | 0 | 0 |
attr |
Gemma2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Gemma2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Gemma2Attention.head_dim |
1 | 0 | 0 |
attr |
Gemma2Attention.q_size |
1 | 0 | 0 |
attr |
Gemma2Attention.kv_size |
1 | 0 | 0 |
attr |
Gemma2Attention.scaling |
1 | 0 | 0 |
attr |
Gemma2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Gemma2Attention.o_proj |
1 | 0 | 0 |
attr |
Gemma2Attention.rotary_emb |
1 | 0 | 0 |
attr |
Gemma2Attention.attn |
1 | 0 | 0 |
attr |
Gemma2MLP.gate_up_proj |
1 | 0 | 0 |
attr |
Gemma2MLP.down_proj |
1 | 0 | 0 |
attr |
Gemma2MLP.act_fn |
1 | 0 | 0 |
meth |
Gemma2Model.init |
3 | 2 | 0 |
attr |
Gemma2Model.config |
1 | 0 | 0 |
attr |
Gemma2Model.quant_config |
1 | 0 | 0 |
attr |
Gemma2Model.embed_tokens |
1 | 0 | 0 |
attr |
Gemma2Model.norm |
1 | 0 | 0 |
attr |
Gemma2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Gemma2DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Gemma2DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Gemma2DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Gemma2DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Gemma2DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Gemma2DecoderLayer.pre_feedforward_layernorm |
1 | 0 | 0 |
attr |
Gemma2DecoderLayer.post_feedforward_layernorm |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Gemma2ForCausalLM.init |
3 | 2 | 0 |
attr |
Gemma2ForCausalLM.config |
1 | 0 | 0 |
attr |
Gemma2ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Gemma2ForCausalLM.model |
1 | 0 | 0 |
attr |
Gemma2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Gemma2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.gemma3 (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3DecoderLayer.forward |
5 | 4 | 0 |
attr |
Gemma3DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Gemma3DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Gemma3DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Gemma3DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Gemma3DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Gemma3DecoderLayer.pre_feedforward_layernorm |
1 | 0 | 0 |
attr |
Gemma3DecoderLayer.post_feedforward_layernorm |
1 | 0 | 0 |
attr |
Gemma3MLP.gate_up_proj |
1 | 0 | 0 |
attr |
Gemma3MLP.down_proj |
1 | 0 | 0 |
attr |
Gemma3MLP.act_fn |
1 | 0 | 0 |
meth |
Gemma3Attention.forward |
4 | 3 | 0 |
attr |
Gemma3Attention.config |
1 | 0 | 0 |
attr |
Gemma3Attention.hidden_size |
1 | 0 | 0 |
attr |
Gemma3Attention.total_num_heads |
1 | 0 | 0 |
attr |
Gemma3Attention.num_heads |
1 | 0 | 0 |
attr |
Gemma3Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Gemma3Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Gemma3Attention.head_dim |
1 | 0 | 0 |
attr |
Gemma3Attention.q_size |
1 | 0 | 0 |
attr |
Gemma3Attention.kv_size |
1 | 0 | 0 |
attr |
Gemma3Attention.scaling |
1 | 0 | 0 |
attr |
Gemma3Attention.qkv_proj |
1 | 0 | 0 |
attr |
Gemma3Attention.o_proj |
1 | 0 | 0 |
attr |
Gemma3Attention.q_norm |
1 | 0 | 0 |
attr |
Gemma3Attention.k_norm |
1 | 0 | 0 |
attr |
Gemma3Attention.is_sliding |
1 | 0 | 0 |
attr |
Gemma3Attention.rotary_emb |
1 | 0 | 0 |
attr |
Gemma3Attention.attn |
1 | 0 | 0 |
meth |
Gemma3Model.init |
3 | 2 | 0 |
meth |
Gemma3Model.forward |
6 | 5 | 0 |
attr |
Gemma3Model.config |
1 | 0 | 0 |
attr |
Gemma3Model.quant_config |
1 | 0 | 0 |
attr |
Gemma3Model.embed_tokens |
1 | 0 | 0 |
attr |
Gemma3Model.norm |
1 | 0 | 0 |
attr |
Gemma3Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Gemma3ForCausalLM.init |
3 | 2 | 0 |
meth |
Gemma3ForCausalLM.forward |
6 | 5 | 0 |
attr |
Gemma3ForCausalLM.config |
1 | 0 | 0 |
attr |
Gemma3ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Gemma3ForCausalLM.model |
1 | 0 | 0 |
attr |
Gemma3ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Gemma3ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Gemma3ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.gemma3_mm (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Gemma3MultiModalProjector.init |
2 | 1 | 0 |
meth |
Gemma3MultiModalProjector.forward |
2 | 1 | 0 |
attr |
Gemma3MultiModalProjector.mm_input_projection_weight |
1 | 0 | 0 |
attr |
Gemma3MultiModalProjector.mm_soft_emb_norm |
1 | 0 | 0 |
attr |
Gemma3MultiModalProjector.patches_per_image |
1 | 0 | 0 |
attr |
Gemma3MultiModalProjector.tokens_per_side |
1 | 0 | 0 |
attr |
Gemma3MultiModalProjector.kernel_size |
1 | 0 | 0 |
attr |
Gemma3MultiModalProjector.avg_pool |
1 | 0 | 0 |
meth |
Gemma3ProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Gemma3ProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
Gemma3ForConditionalGeneration.init |
3 | 2 | 0 |
prop |
Gemma3ForConditionalGeneration.dtype |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.gemma3n (90 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Gemma3nMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Gemma3nMLP.down_proj |
1 | 0 | 0 |
attr |
Gemma3nMLP.act_fn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
EPS |
1 | 0 | 0 |
meth |
Gemma3nSelfDecoder.init |
5 | 4 | 0 |
meth |
Gemma3nSelfDecoder.forward |
6 | 5 | 0 |
attr |
Gemma3nSelfDecoder.decoder_layers |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.layer_idx_start |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.config |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.embed_tokens |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.embed_scale |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.embed_tokens_per_layer |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.embed_scale_per_layer |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.per_layer_model_projection |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.per_layer_projection_norm |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.per_layer_input_scale |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.per_layer_projection_scale |
1 | 0 | 0 |
attr |
Gemma3nSelfDecoder.altup_projections |
1 | 0 | 0 |
attr |
Gemma3nLaurelBlock.linear_left |
1 | 0 | 0 |
attr |
Gemma3nLaurelBlock.linear_right |
1 | 0 | 0 |
attr |
Gemma3nLaurelBlock.post_laurel_norm |
1 | 0 | 0 |
meth |
Gemma3nAltUp.init |
8 | 7 | 0 |
attr |
Gemma3nAltUp.altup_num_inputs |
1 | 0 | 0 |
attr |
Gemma3nAltUp.altup_active_idx |
1 | 0 | 0 |
attr |
Gemma3nAltUp.altup_coef_clip |
1 | 0 | 0 |
attr |
Gemma3nAltUp.correction_coefs |
1 | 0 | 0 |
attr |
Gemma3nAltUp.prediction_coefs |
1 | 0 | 0 |
attr |
Gemma3nAltUp.modality_router |
1 | 0 | 0 |
attr |
Gemma3nAltUp.router_norm |
1 | 0 | 0 |
attr |
Gemma3nAltUp.router_input_scale |
1 | 0 | 0 |
attr |
Gemma3nAltUp.correct_output_scale |
1 | 0 | 0 |
meth |
Gemma3nAttention.forward |
4 | 3 | 0 |
attr |
Gemma3nAttention.config |
1 | 0 | 0 |
attr |
Gemma3nAttention.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nAttention.total_num_heads |
1 | 0 | 0 |
attr |
Gemma3nAttention.num_heads |
1 | 0 | 0 |
attr |
Gemma3nAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Gemma3nAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Gemma3nAttention.head_dim |
1 | 0 | 0 |
attr |
Gemma3nAttention.q_size |
1 | 0 | 0 |
attr |
Gemma3nAttention.kv_size |
1 | 0 | 0 |
attr |
Gemma3nAttention.qkv_proj |
1 | 0 | 0 |
attr |
Gemma3nAttention.o_proj |
1 | 0 | 0 |
attr |
Gemma3nAttention.q_norm |
1 | 0 | 0 |
attr |
Gemma3nAttention.k_norm |
1 | 0 | 0 |
attr |
Gemma3nAttention.v_norm |
1 | 0 | 0 |
attr |
Gemma3nAttention.sliding_window |
1 | 0 | 0 |
attr |
Gemma3nAttention.is_kv_shared |
1 | 0 | 0 |
attr |
Gemma3nAttention.rotary_emb |
1 | 0 | 0 |
attr |
Gemma3nAttention.attn |
1 | 0 | 0 |
meth |
Gemma3nTextModel.init |
3 | 2 | 0 |
meth |
Gemma3nTextModel.fast_prefill_forward |
6 | 5 | 0 |
meth |
Gemma3nTextModel.normal_forward |
6 | 5 | 0 |
meth |
Gemma3nTextModel.forward |
7 | 6 | 0 |
prop |
Gemma3nTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Gemma3nTextModel.config |
1 | 0 | 0 |
attr |
Gemma3nTextModel.quant_config |
1 | 0 | 0 |
attr |
Gemma3nTextModel.altup_unembed_projections |
1 | 0 | 0 |
attr |
Gemma3nTextModel.norm |
1 | 0 | 0 |
attr |
Gemma3nTextModel.fast_prefill_enabled |
1 | 0 | 0 |
attr |
Gemma3nTextModel.self_decoder |
1 | 0 | 0 |
attr |
Gemma3nTextModel.cross_decoder |
1 | 0 | 0 |
attr |
Gemma3nTextModel.positions |
1 | 0 | 0 |
attr |
Gemma3nTextModel.hidden_states |
1 | 0 | 0 |
attr |
Gemma3nTextModel.per_layer_inputs |
1 | 0 | 0 |
meth |
Gemma3nForCausalLM.init |
3 | 2 | 0 |
meth |
Gemma3nForCausalLM.forward |
7 | 6 | 0 |
attr |
Gemma3nForCausalLM.config |
1 | 0 | 0 |
attr |
Gemma3nForCausalLM.cache_config |
1 | 0 | 0 |
attr |
Gemma3nForCausalLM.model |
1 | 0 | 0 |
attr |
Gemma3nForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
Gemma3nCrossDecoder.init |
5 | 4 | 0 |
meth |
Gemma3nCrossDecoder.forward |
5 | 4 | 0 |
attr |
Gemma3nCrossDecoder.decoder_layers |
1 | 0 | 0 |
attr |
Gemma3nCrossDecoder.layer_idx_start |
1 | 0 | 0 |
meth |
Gemma3nDecoderLayer.forward |
5 | 4 | 0 |
attr |
Gemma3nDecoderLayer.altup_active_idx |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.altup |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.mlp |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.laurel |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.per_layer_input_gate |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.per_layer_projection |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.pre_feedforward_layernorm |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.post_feedforward_layernorm |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.post_per_layer_input_norm |
1 | 0 | 0 |
attr |
Gemma3nDecoderLayer.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.gemma3n_mm (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3nMultimodalEmbedder.init |
3 | 2 | 0 |
attr |
Gemma3nMultimodalEmbedder.multimodal_hidden_size |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.eps |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.vocab_offset |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.text_hidden_size |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.embedding |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.hard_embedding_norm |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.soft_embedding_norm |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.embedding_projection |
1 | 0 | 0 |
attr |
Gemma3nMultimodalEmbedder.embedding_post_projection_norm |
1 | 0 | 0 |
meth |
Gemma3nForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Gemma3nForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.embed_vision |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.embed_audio |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.per_layer_embeddings |
1 | 0 | 0 |
meth |
Gemma3nProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Gemma3nProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
Gemma3nProcessingInfo.get_data_parser |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.glm (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmForCausalLM.init |
3 | 2 | 0 |
vllm.model_executor.models.glm4 (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Glm4DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Glm4DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Glm4DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Glm4DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Glm4DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Glm4DecoderLayer.post_self_attn_layernorm |
1 | 0 | 0 |
attr |
Glm4DecoderLayer.post_mlp_layernorm |
1 | 0 | 0 |
attr |
Glm4Attention.hidden_size |
1 | 0 | 0 |
attr |
Glm4Attention.total_num_heads |
1 | 0 | 0 |
attr |
Glm4Attention.num_heads |
1 | 0 | 0 |
attr |
Glm4Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Glm4Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Glm4Attention.head_dim |
1 | 0 | 0 |
attr |
Glm4Attention.q_size |
1 | 0 | 0 |
attr |
Glm4Attention.kv_size |
1 | 0 | 0 |
attr |
Glm4Attention.scaling |
1 | 0 | 0 |
attr |
Glm4Attention.qkv_proj |
1 | 0 | 0 |
attr |
Glm4Attention.o_proj |
1 | 0 | 0 |
attr |
Glm4Attention.rotary_emb |
1 | 0 | 0 |
attr |
Glm4Attention.attn |
1 | 0 | 0 |
meth |
Glm4Model.init |
3 | 2 | 0 |
meth |
Glm4ForCausalLM.init |
3 | 2 | 0 |
attr |
Glm4ForCausalLM.config |
1 | 0 | 0 |
attr |
Glm4ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Glm4ForCausalLM.model |
1 | 0 | 0 |
attr |
Glm4ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Glm4ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Glm4ForCausalLM.lm_head |
1 | 0 | 0 |
vllm.model_executor.models.glm4_1v (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Glm4vVisionTransformer.hidden_size |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.num_heads |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.out_hidden_size |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.blocks |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.merger |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.post_conv_layernorm |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.downsample |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.post_layernorm |
1 | 0 | 0 |
attr |
Glm4vVisionTransformer.attn_backend |
1 | 0 | 0 |
meth |
Glm4vPatchMerger.forward |
2 | 1 | 0 |
attr |
Glm4vPatchMerger.hidden_size |
1 | 0 | 0 |
attr |
Glm4vPatchMerger.proj |
1 | 0 | 0 |
attr |
Glm4vPatchMerger.post_projection_norm |
1 | 0 | 0 |
attr |
Glm4vPatchMerger.gate_up_proj |
1 | 0 | 0 |
attr |
Glm4vPatchMerger.down_proj |
1 | 0 | 0 |
attr |
Glm4vPatchMerger.act_fn |
1 | 0 | 0 |
attr |
Glm4vPatchMerger.extra_activation_func |
1 | 0 | 0 |
func |
all_gather_interleave |
4 | 2 | 0 |
meth |
Glm4vVisionEmbeddings.init |
2 | 1 | 0 |
meth |
Glm4vVisionEmbeddings.forward |
6 | 1 | 0 |
attr |
Glm4vVisionEmbeddings.config |
1 | 0 | 0 |
attr |
Glm4vVisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
Glm4vVisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
Glm4vVisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
Glm4vVisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
Glm4vVisionEmbeddings.position_embedding |
1 | 0 | 0 |
meth |
Glm4vForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Glm4vForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Glm4vForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Glm4vForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Glm4vForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Glm4vForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Glm4vForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Glm4vForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Glm4vVisionAttention.tp_size |
1 | 0 | 0 |
attr |
Glm4vVisionAttention.tp_rank |
1 | 0 | 0 |
attr |
Glm4vVisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
Glm4vVisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
Glm4vVisionAttention.qkv |
1 | 0 | 0 |
attr |
Glm4vVisionAttention.proj |
1 | 0 | 0 |
attr |
Glm4vVisionAttention.attn |
1 | 0 | 0 |
attr |
Glm4vVisionAttention.apply_rotary_emb |
1 | 0 | 0 |
meth |
Glm4vVisionMLP.init |
6 | 5 | 0 |
meth |
Glm4vVisionMLP.forward |
2 | 1 | 0 |
attr |
Glm4vVisionMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Glm4vVisionMLP.down_proj |
1 | 0 | 0 |
attr |
Glm4vVisionMLP.act_fn |
1 | 0 | 0 |
attr |
Glm4vVisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionPatchEmbed.temporal_patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionPatchEmbed.hidden_size |
1 | 0 | 0 |
attr |
Glm4vVisionPatchEmbed.proj |
1 | 0 | 0 |
meth |
Glm4vProcessingInfo.get_data_parser |
1 | 0 | 0 |
attr |
Glm4vVisionBlock.norm1 |
1 | 0 | 0 |
attr |
Glm4vVisionBlock.norm2 |
1 | 0 | 0 |
attr |
Glm4vVisionBlock.attn |
1 | 0 | 0 |
attr |
Glm4vVisionBlock.mlp |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.glm4_moe (63 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4MoeForCausalLM.init |
3 | 2 | 0 |
attr |
Glm4MoeForCausalLM.config |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.expert_weights |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.num_expert_groups |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.moe_layers |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Glm4MoeMLP.forward |
2 | 0 | 0 |
attr |
Glm4MoeMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Glm4MoeMLP.down_proj |
1 | 0 | 0 |
attr |
Glm4MoeMLP.act_fn |
1 | 0 | 0 |
meth |
Glm4MoE.init |
5 | 4 | 0 |
attr |
Glm4MoE.tp_size |
1 | 0 | 0 |
attr |
Glm4MoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4MoE.ep_group |
1 | 0 | 0 |
attr |
Glm4MoE.ep_rank |
1 | 0 | 0 |
attr |
Glm4MoE.ep_size |
1 | 0 | 0 |
attr |
Glm4MoE.gate |
1 | 0 | 0 |
attr |
Glm4MoE.enable_eplb |
1 | 0 | 0 |
attr |
Glm4MoE.n_redundant_experts |
1 | 0 | 0 |
attr |
Glm4MoE.n_logical_experts |
1 | 0 | 0 |
attr |
Glm4MoE.n_physical_experts |
1 | 0 | 0 |
attr |
Glm4MoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
Glm4MoE.physical_expert_start |
1 | 0 | 0 |
attr |
Glm4MoE.physical_expert_end |
1 | 0 | 0 |
attr |
Glm4MoE.experts |
1 | 0 | 0 |
attr |
Glm4MoE.shared_experts |
1 | 0 | 0 |
attr |
Glm4MoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Glm4MoeDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Glm4MoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Glm4MoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Glm4MoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Glm4MoeDecoderLayer.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4MoeDecoderLayer.mlp |
1 | 0 | 0 |
attr |
Glm4MoeAttention.hidden_size |
1 | 0 | 0 |
attr |
Glm4MoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
Glm4MoeAttention.num_heads |
1 | 0 | 0 |
attr |
Glm4MoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Glm4MoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Glm4MoeAttention.head_dim |
1 | 0 | 0 |
attr |
Glm4MoeAttention.q_size |
1 | 0 | 0 |
attr |
Glm4MoeAttention.kv_size |
1 | 0 | 0 |
attr |
Glm4MoeAttention.scaling |
1 | 0 | 0 |
attr |
Glm4MoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4MoeAttention.use_qk_norm |
1 | 0 | 0 |
attr |
Glm4MoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
Glm4MoeAttention.o_proj |
1 | 0 | 0 |
attr |
Glm4MoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
Glm4MoeAttention.attn |
1 | 0 | 0 |
attr |
Glm4MoeAttention.q_norm |
1 | 0 | 0 |
attr |
Glm4MoeAttention.k_norm |
1 | 0 | 0 |
meth |
Glm4MoeModel.init |
3 | 2 | 0 |
attr |
Glm4MoeModel.config |
1 | 0 | 0 |
attr |
Glm4MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Glm4MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Glm4MoeModel.norm |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.glm4_moe_lite (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Glm4MoeLiteDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Glm4MoeLiteDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Glm4MoeLiteDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Glm4MoeLiteDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Glm4MoeLiteDecoderLayer.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4MoeLiteDecoderLayer.mlp |
1 | 0 | 0 |
meth |
Glm4MoeLiteForCausalLM.init |
3 | 2 | 0 |
meth |
Glm4MoeLiteForCausalLM.set_moe_parameters |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.config |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.use_mha |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.fuse_qkv_a_proj |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.model |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.lm_head |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Glm4MoeLiteModel.init |
3 | 2 | 0 |
attr |
Glm4MoeLiteModel.config |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.device |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.is_v32 |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.embed_tokens |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.norm |
1 | 0 | 0 |
vllm.model_executor.models.glm4_moe_lite_mtp (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
SharedHead.norm |
1 | 0 | 0 |
attr |
SharedHead.head |
1 | 0 | 0 |
meth |
Glm4MoeLiteMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictor.logits_processor |
1 | 0 | 0 |
meth |
Glm4MoeLiteMTP.init |
3 | 2 | 0 |
attr |
Glm4MoeLiteMTP.config |
1 | 0 | 0 |
attr |
Glm4MoeLiteMTP.model |
1 | 0 | 0 |
attr |
Glm4MoeLiteMTP.expert_weights |
1 | 0 | 0 |
attr |
Glm4MoeLiteMTP.num_moe_layers |
1 | 0 | 0 |
attr |
Glm4MoeLiteMTP.num_expert_groups |
1 | 0 | 0 |
meth |
Glm4MoeLiteMultiTokenPredictorLayer.init |
3 | 2 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictorLayer.config |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictorLayer.enorm |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictorLayer.hnorm |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictorLayer.eh_proj |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictorLayer.device |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictorLayer.is_v32 |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictorLayer.shared_head |
1 | 0 | 0 |
attr |
Glm4MoeLiteMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
vllm.model_executor.models.glm4_moe_mtp (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
SharedHead.norm |
1 | 0 | 0 |
attr |
SharedHead.head |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictorLayer.enorm |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictorLayer.hnorm |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictorLayer.eh_proj |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictorLayer.shared_head |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictorLayer.enable_eplb |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
meth |
Glm4MoeMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
Glm4MoeMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
Glm4MoeMultiTokenPredictor.logits_processor |
1 | 0 | 0 |
meth |
Glm4MoeMTP.init |
3 | 2 | 0 |
attr |
Glm4MoeMTP.config |
1 | 0 | 0 |
attr |
Glm4MoeMTP.model |
1 | 0 | 0 |
attr |
Glm4MoeMTP.expert_weights |
1 | 0 | 0 |
attr |
Glm4MoeMTP.num_moe_layers |
1 | 0 | 0 |
attr |
Glm4MoeMTP.num_expert_groups |
1 | 0 | 0 |
vllm.model_executor.models.glm4v (61 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EVA2CLIPTransformer.init |
4 | 2 | 0 |
meth |
EVA2CLIPTransformer.forward |
2 | 0 | 0 |
attr |
EVA2CLIPTransformer.layers |
1 | 0 | 0 |
meth |
EVA2CLIPModel.init |
4 | 2 | 0 |
attr |
EVA2CLIPModel.patch_embedding |
1 | 0 | 0 |
attr |
EVA2CLIPModel.transformer |
1 | 0 | 0 |
attr |
EVA2CLIPModel.linear_proj |
1 | 0 | 0 |
attr |
EVA2CLIPModel.conv |
1 | 0 | 0 |
attr |
EVA2CLIPModel.boi |
1 | 0 | 0 |
attr |
EVA2CLIPModel.eoi |
1 | 0 | 0 |
attr |
EVA2CLIPModel.scaling_factor |
1 | 0 | 0 |
meth |
EVA2CLIPTransformerLayer.init |
4 | 2 | 0 |
meth |
EVA2CLIPTransformerLayer.forward |
2 | 0 | 0 |
attr |
EVA2CLIPTransformerLayer.input_layernorm |
1 | 0 | 0 |
attr |
EVA2CLIPTransformerLayer.attention |
1 | 0 | 0 |
attr |
EVA2CLIPTransformerLayer.mlp |
1 | 0 | 0 |
attr |
EVA2CLIPTransformerLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
GLM4VModel.init |
3 | 2 | 0 |
attr |
GLM4VModel.vision |
1 | 0 | 0 |
meth |
EVA2CLIPPatchEmbedding.init |
2 | 0 | 0 |
attr |
EVA2CLIPPatchEmbedding.proj |
1 | 0 | 0 |
attr |
EVA2CLIPPatchEmbedding.cls_embedding |
1 | 0 | 0 |
attr |
EVA2CLIPPatchEmbedding.position_embedding |
1 | 0 | 0 |
meth |
GLM4VProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
EVA2CLIPAttention.init |
4 | 2 | 0 |
attr |
EVA2CLIPAttention.hidden_size |
1 | 0 | 0 |
attr |
EVA2CLIPAttention.tp_size |
1 | 0 | 0 |
attr |
EVA2CLIPAttention.num_heads_per_rank |
1 | 0 | 0 |
attr |
EVA2CLIPAttention.head_dim |
1 | 0 | 0 |
attr |
EVA2CLIPAttention.scale |
1 | 0 | 0 |
attr |
EVA2CLIPAttention.query_key_value |
1 | 0 | 0 |
attr |
EVA2CLIPAttention.dense |
1 | 0 | 0 |
attr |
EVA2CLIPAttention.attn |
1 | 0 | 0 |
attr |
EVA2CLIPAttention.output_dropout |
1 | 0 | 0 |
attr |
GLM4VProcessor.config |
1 | 0 | 0 |
attr |
GLM4VProcessor.tokenizer |
1 | 0 | 0 |
attr |
GLM4VProcessor.image_transform |
1 | 0 | 0 |
meth |
EVA2CLIPMLP.init |
4 | 2 | 0 |
attr |
EVA2CLIPMLP.config |
1 | 0 | 0 |
attr |
EVA2CLIPMLP.activation_fn |
1 | 0 | 0 |
attr |
EVA2CLIPMLP.fc1 |
1 | 0 | 0 |
attr |
EVA2CLIPMLP.fc2 |
1 | 0 | 0 |
meth |
EVA2CLIPGLU.init |
5 | 2 | 0 |
meth |
EVA2CLIPGLU.forward |
2 | 0 | 0 |
attr |
EVA2CLIPGLU.linear_proj |
1 | 0 | 0 |
attr |
EVA2CLIPGLU.norm1 |
1 | 0 | 0 |
attr |
EVA2CLIPGLU.act1 |
1 | 0 | 0 |
attr |
EVA2CLIPGLU.act2 |
1 | 0 | 0 |
attr |
EVA2CLIPGLU.merged_proj |
1 | 0 | 0 |
attr |
EVA2CLIPGLU.dense_4h_to_h |
1 | 0 | 0 |
vllm.model_executor.models.glm_ocr (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
GlmOcrVisionBlock.norm1 |
1 | 0 | 0 |
attr |
GlmOcrVisionBlock.norm2 |
1 | 0 | 0 |
attr |
GlmOcrVisionBlock.attn |
1 | 0 | 0 |
attr |
GlmOcrVisionBlock.mlp |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.hidden_size |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.num_heads |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.patch_size |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.out_hidden_size |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.blocks |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.merger |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.downsample |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.post_layernorm |
1 | 0 | 0 |
attr |
GlmOcrVisionTransformer.attn_backend |
1 | 0 | 0 |
meth |
GlmOcrForConditionalGeneration.init |
3 | 2 | 0 |
attr |
GlmOcrForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.tp_size |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.tp_rank |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.head_dim |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.q_norm |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.k_norm |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.qkv |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.proj |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.attn |
1 | 0 | 0 |
attr |
GlmOcrVisionAttention.apply_rotary_emb |
1 | 0 | 0 |
vllm.model_executor.models.glm_ocr_mtp (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmOcrMTP.init |
3 | 2 | 0 |
attr |
GlmOcrMTP.config |
1 | 0 | 0 |
attr |
GlmOcrMTP.quant_config |
1 | 0 | 0 |
attr |
GlmOcrMTP.model |
1 | 0 | 0 |
attr |
GlmOcrMTP.expert_weights |
1 | 0 | 0 |
attr |
GlmOcrMTP.num_layers |
1 | 0 | 0 |
meth |
GlmOcrMultiTokenPredictorLayer.init |
3 | 2 | 0 |
attr |
GlmOcrMultiTokenPredictorLayer.config |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictorLayer.enorm |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictorLayer.hnorm |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictorLayer.eh_proj |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictorLayer.device |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictorLayer.shared_head |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
meth |
GlmOcrMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
GlmOcrMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
GlmOcrMultiTokenPredictor.logits_processor |
1 | 0 | 0 |
vllm.model_executor.models.glmasr (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmAsrEncoderLayer.init |
4 | 2 | 0 |
attr |
GlmAsrEncoderLayer.hidden_size |
1 | 0 | 0 |
attr |
GlmAsrEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
GlmAsrEncoderLayer.mlp |
1 | 0 | 0 |
attr |
GlmAsrEncoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
GlmAsrEncoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
GlmAsrEncoderAttention.init |
4 | 2 | 0 |
attr |
GlmAsrEncoderAttention.config |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.hidden_size |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.num_heads |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.num_kv_heads |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.head_dim |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.tp_size |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.num_heads_per_rank |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.num_kv_heads_per_rank |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.qkv_proj |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.o_proj |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.rotary_dim |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.apply_rotary_emb |
1 | 0 | 0 |
attr |
GlmAsrEncoderAttention.attn |
1 | 0 | 0 |
meth |
GlmAsrEncoderMLP.init |
4 | 2 | 0 |
attr |
GlmAsrEncoderMLP.config |
1 | 0 | 0 |
attr |
GlmAsrEncoderMLP.hidden_size |
1 | 0 | 0 |
attr |
GlmAsrEncoderMLP.intermediate_size |
1 | 0 | 0 |
attr |
GlmAsrEncoderMLP.fc1 |
1 | 0 | 0 |
attr |
GlmAsrEncoderMLP.act_fn |
1 | 0 | 0 |
attr |
GlmAsrEncoderMLP.fc2 |
1 | 0 | 0 |
meth |
GlmAsrEncoderRotaryEmbedding.init |
2 | 1 | 0 |
attr |
GlmAsrEncoderRotaryEmbedding.dim |
1 | 0 | 0 |
attr |
GlmAsrEncoderRotaryEmbedding.head_dim |
1 | 0 | 0 |
attr |
GlmAsrEncoderRotaryEmbedding.attention_scaling |
1 | 0 | 0 |
meth |
GlmAsrMultiModalProjector.init |
4 | 3 | 0 |
attr |
GlmAsrMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
GlmAsrMultiModalProjector.act |
1 | 0 | 0 |
attr |
GlmAsrMultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
GlmAsrForConditionalGeneration.init |
3 | 2 | 0 |
attr |
GlmAsrForConditionalGeneration.config |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
GlmAsrProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
GlmAsrEncoder.init |
4 | 2 | 0 |
attr |
GlmAsrEncoder.config |
1 | 0 | 0 |
attr |
GlmAsrEncoder.conv1 |
1 | 0 | 0 |
attr |
GlmAsrEncoder.conv2 |
1 | 0 | 0 |
attr |
GlmAsrEncoder.layers |
1 | 0 | 0 |
attr |
GlmAsrEncoder.norm |
1 | 0 | 0 |
attr |
GlmAsrEncoder.rotary_emb |
1 | 0 | 0 |
vllm.model_executor.models.gpt2 (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPT2LMHeadModel.init |
3 | 2 | 0 |
attr |
GPT2LMHeadModel.config |
1 | 0 | 0 |
attr |
GPT2LMHeadModel.quant_config |
1 | 0 | 0 |
attr |
GPT2LMHeadModel.transformer |
1 | 0 | 0 |
attr |
GPT2LMHeadModel.lm_head |
1 | 0 | 0 |
attr |
GPT2LMHeadModel.logits_processor |
1 | 0 | 0 |
attr |
GPT2LMHeadModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
GPT2Block.init |
5 | 4 | 0 |
attr |
GPT2Block.ln_1 |
1 | 0 | 0 |
attr |
GPT2Block.attn |
1 | 0 | 0 |
attr |
GPT2Block.ln_2 |
1 | 0 | 0 |
attr |
GPT2Block.mlp |
1 | 0 | 0 |
meth |
GPT2ForSequenceClassification.init |
3 | 2 | 0 |
meth |
GPT2ForSequenceClassification.load_weights |
2 | 1 | 0 |
attr |
GPT2ForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
GPT2ForSequenceClassification.score |
1 | 0 | 0 |
attr |
GPT2ForSequenceClassification.pooler |
1 | 0 | 0 |
meth |
GPT2MLP.init |
5 | 4 | 0 |
attr |
GPT2MLP.c_fc |
1 | 0 | 0 |
attr |
GPT2MLP.c_proj |
1 | 0 | 0 |
attr |
GPT2MLP.act |
1 | 0 | 0 |
meth |
GPT2Attention.init |
5 | 4 | 0 |
attr |
GPT2Attention.hidden_size |
1 | 0 | 0 |
attr |
GPT2Attention.num_heads |
1 | 0 | 0 |
attr |
GPT2Attention.head_dim |
1 | 0 | 0 |
attr |
GPT2Attention.scale |
1 | 0 | 0 |
attr |
GPT2Attention.c_attn |
1 | 0 | 0 |
attr |
GPT2Attention.c_proj |
1 | 0 | 0 |
attr |
GPT2Attention.attn |
1 | 0 | 0 |
meth |
GPT2Model.init |
3 | 2 | 0 |
attr |
GPT2Model.config |
1 | 0 | 0 |
attr |
GPT2Model.embed_dim |
1 | 0 | 0 |
attr |
GPT2Model.wte |
1 | 0 | 0 |
attr |
GPT2Model.wpe |
1 | 0 | 0 |
attr |
GPT2Model.ln_f |
1 | 0 | 0 |
attr |
GPT2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.gpt_bigcode (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTBigMLP.init |
5 | 4 | 0 |
attr |
GPTBigMLP.c_fc |
1 | 0 | 0 |
attr |
GPTBigMLP.c_proj |
1 | 0 | 0 |
attr |
GPTBigMLP.act |
1 | 0 | 0 |
meth |
GPTBigCodeForCausalLM.init |
3 | 2 | 0 |
attr |
GPTBigCodeForCausalLM.config |
1 | 0 | 0 |
attr |
GPTBigCodeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
GPTBigCodeForCausalLM.transformer |
1 | 0 | 0 |
attr |
GPTBigCodeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
GPTBigCodeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GPTBigCodeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
GPTBigCodeBlock.init |
5 | 4 | 0 |
attr |
GPTBigCodeBlock.ln_1 |
1 | 0 | 0 |
attr |
GPTBigCodeBlock.attn |
1 | 0 | 0 |
attr |
GPTBigCodeBlock.ln_2 |
1 | 0 | 0 |
attr |
GPTBigCodeBlock.mlp |
1 | 0 | 0 |
meth |
GPTBigCodeModel.init |
3 | 2 | 0 |
attr |
GPTBigCodeModel.config |
1 | 0 | 0 |
attr |
GPTBigCodeModel.embed_dim |
1 | 0 | 0 |
attr |
GPTBigCodeModel.vocab_size |
1 | 0 | 0 |
attr |
GPTBigCodeModel.wte |
1 | 0 | 0 |
attr |
GPTBigCodeModel.wpe |
1 | 0 | 0 |
attr |
GPTBigCodeModel.ln_f |
1 | 0 | 0 |
attr |
GPTBigCodeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
GPTBigCodeAttention.init |
5 | 4 | 0 |
attr |
GPTBigCodeAttention.hidden_size |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.tensor_model_parallel_world_size |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.num_heads |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.head_dim |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.scale |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.multi_query |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.kv_dim |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.c_attn |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.c_proj |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.attn |
1 | 0 | 0 |
attr |
GPTBigCodeAttention.num_kv_heads |
1 | 0 | 0 |
vllm.model_executor.models.gpt_j (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTJForCausalLM.init |
3 | 2 | 0 |
attr |
GPTJForCausalLM.config |
1 | 0 | 0 |
attr |
GPTJForCausalLM.quant_config |
1 | 0 | 0 |
attr |
GPTJForCausalLM.transformer |
1 | 0 | 0 |
attr |
GPTJForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GPTJForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
GPTJForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
GPTJMLP.init |
5 | 4 | 0 |
attr |
GPTJMLP.fc_in |
1 | 0 | 0 |
attr |
GPTJMLP.fc_out |
1 | 0 | 0 |
attr |
GPTJMLP.act |
1 | 0 | 0 |
meth |
GPTJModel.init |
3 | 2 | 0 |
attr |
GPTJModel.config |
1 | 0 | 0 |
attr |
GPTJModel.quant_config |
1 | 0 | 0 |
attr |
GPTJModel.embed_dim |
1 | 0 | 0 |
attr |
GPTJModel.wte |
1 | 0 | 0 |
attr |
GPTJModel.ln_f |
1 | 0 | 0 |
attr |
GPTJModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
GPTJBlock.init |
5 | 4 | 0 |
attr |
GPTJBlock.ln_1 |
1 | 0 | 0 |
attr |
GPTJBlock.attn |
1 | 0 | 0 |
attr |
GPTJBlock.mlp |
1 | 0 | 0 |
meth |
GPTJAttention.init |
5 | 4 | 0 |
attr |
GPTJAttention.total_num_heads |
1 | 0 | 0 |
attr |
GPTJAttention.hidden_size |
1 | 0 | 0 |
attr |
GPTJAttention.head_size |
1 | 0 | 0 |
attr |
GPTJAttention.qkv_proj |
1 | 0 | 0 |
attr |
GPTJAttention.out_proj |
1 | 0 | 0 |
attr |
GPTJAttention.num_heads |
1 | 0 | 0 |
attr |
GPTJAttention.rotary_emb |
1 | 0 | 0 |
attr |
GPTJAttention.attn |
1 | 0 | 0 |
vllm.model_executor.models.gpt_neox (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoXAttention.init |
5 | 4 | 0 |
attr |
GPTNeoXAttention.total_num_heads |
1 | 0 | 0 |
attr |
GPTNeoXAttention.hidden_size |
1 | 0 | 0 |
attr |
GPTNeoXAttention.head_size |
1 | 0 | 0 |
attr |
GPTNeoXAttention.bias |
1 | 0 | 0 |
attr |
GPTNeoXAttention.num_heads |
1 | 0 | 0 |
attr |
GPTNeoXAttention.query_key_value |
1 | 0 | 0 |
attr |
GPTNeoXAttention.dense |
1 | 0 | 0 |
attr |
GPTNeoXAttention.rotary_emb |
1 | 0 | 0 |
attr |
GPTNeoXAttention.attn |
1 | 0 | 0 |
meth |
GPTNeoXMLP.init |
4 | 3 | 0 |
meth |
GPTNeoXMLP.forward |
2 | 0 | 0 |
attr |
GPTNeoXMLP.dense_h_to_4h |
1 | 0 | 0 |
attr |
GPTNeoXMLP.dense_4h_to_h |
1 | 0 | 0 |
attr |
GPTNeoXMLP.act |
1 | 0 | 0 |
meth |
GPTNeoXModel.init |
3 | 2 | 0 |
attr |
GPTNeoXModel.config |
1 | 0 | 0 |
attr |
GPTNeoXModel.embed_in |
1 | 0 | 0 |
attr |
GPTNeoXModel.final_layer_norm |
1 | 0 | 0 |
attr |
GPTNeoXModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
GPTNeoXLayer.init |
5 | 4 | 0 |
attr |
GPTNeoXLayer.use_parallel_residual |
1 | 0 | 0 |
attr |
GPTNeoXLayer.input_layernorm |
1 | 0 | 0 |
attr |
GPTNeoXLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
GPTNeoXLayer.attention |
1 | 0 | 0 |
attr |
GPTNeoXLayer.mlp |
1 | 0 | 0 |
meth |
GPTNeoXForCausalLM.init |
3 | 2 | 0 |
attr |
GPTNeoXForCausalLM.config |
1 | 0 | 0 |
attr |
GPTNeoXForCausalLM.quant_config |
1 | 0 | 0 |
attr |
GPTNeoXForCausalLM.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForCausalLM.embed_out |
1 | 0 | 0 |
attr |
GPTNeoXForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
GPTNeoXForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.gpt_oss (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TransformerBlock.init |
4 | 3 | 0 |
attr |
TransformerBlock.layer_idx |
1 | 0 | 0 |
attr |
TransformerBlock.attn |
1 | 0 | 0 |
attr |
TransformerBlock.mlp |
1 | 0 | 0 |
attr |
TransformerBlock.input_layernorm |
1 | 0 | 0 |
attr |
TransformerBlock.post_attention_layernorm |
1 | 0 | 0 |
meth |
OAIAttention.init |
5 | 4 | 0 |
attr |
OAIAttention.layer_idx |
1 | 0 | 0 |
attr |
OAIAttention.head_dim |
1 | 0 | 0 |
attr |
OAIAttention.num_attention_heads |
1 | 0 | 0 |
attr |
OAIAttention.num_key_value_heads |
1 | 0 | 0 |
attr |
OAIAttention.hidden_size |
1 | 0 | 0 |
attr |
OAIAttention.rotary_emb |
1 | 0 | 0 |
attr |
OAIAttention.sinks |
1 | 0 | 0 |
attr |
OAIAttention.q_size |
1 | 0 | 0 |
attr |
OAIAttention.kv_size |
1 | 0 | 0 |
attr |
OAIAttention.scaling |
1 | 0 | 0 |
attr |
OAIAttention.qkv_proj |
1 | 0 | 0 |
attr |
OAIAttention.o_proj |
1 | 0 | 0 |
attr |
OAIAttention.num_local_attention_heads |
1 | 0 | 0 |
attr |
OAIAttention.num_local_key_value_heads |
1 | 0 | 0 |
attr |
OAIAttention.attn |
1 | 0 | 0 |
meth |
MLPBlock.init |
4 | 3 | 0 |
attr |
MLPBlock.is_sequence_parallel |
1 | 0 | 0 |
attr |
MLPBlock.layer_idx |
1 | 0 | 0 |
attr |
MLPBlock.num_experts |
1 | 0 | 0 |
attr |
MLPBlock.hidden_size |
1 | 0 | 0 |
attr |
MLPBlock.experts_per_token |
1 | 0 | 0 |
attr |
MLPBlock.world_size |
1 | 0 | 0 |
attr |
MLPBlock.router |
1 | 0 | 0 |
attr |
MLPBlock.experts |
1 | 0 | 0 |
meth |
GptOssModel.init |
3 | 2 | 0 |
attr |
GptOssModel.config |
1 | 0 | 0 |
attr |
GptOssModel.quant_config |
1 | 0 | 0 |
attr |
GptOssModel.parallel_config |
1 | 0 | 0 |
attr |
GptOssModel.embedding |
1 | 0 | 0 |
attr |
GptOssModel.norm |
1 | 0 | 0 |
attr |
GptOssModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GptOssModel.aux_hidden_state_layers |
1 | 0 | 0 |
meth |
GptOssForCausalLM.init |
3 | 2 | 0 |
attr |
GptOssForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
GptOssForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
GptOssForCausalLM.config |
1 | 0 | 0 |
attr |
GptOssForCausalLM.model |
1 | 0 | 0 |
attr |
GptOssForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GptOssForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
GptOssForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.granite (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
GraniteAttention.hidden_size |
1 | 0 | 0 |
attr |
GraniteAttention.total_num_heads |
1 | 0 | 0 |
attr |
GraniteAttention.num_heads |
1 | 0 | 0 |
attr |
GraniteAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
GraniteAttention.num_kv_heads |
1 | 0 | 0 |
attr |
GraniteAttention.head_dim |
1 | 0 | 0 |
attr |
GraniteAttention.q_size |
1 | 0 | 0 |
attr |
GraniteAttention.kv_size |
1 | 0 | 0 |
attr |
GraniteAttention.scaling |
1 | 0 | 0 |
attr |
GraniteAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
GraniteAttention.qkv_proj |
1 | 0 | 0 |
attr |
GraniteAttention.o_proj |
1 | 0 | 0 |
attr |
GraniteAttention.rotary_emb |
1 | 0 | 0 |
attr |
GraniteAttention.attn |
1 | 0 | 0 |
attr |
GraniteDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
GraniteDecoderLayer.residual_multiplier |
1 | 0 | 0 |
attr |
GraniteDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
GraniteDecoderLayer.mlp |
1 | 0 | 0 |
attr |
GraniteDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
GraniteDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
GraniteMLP.forward |
2 | 0 | 0 |
attr |
GraniteMLP.gate_up_proj |
1 | 0 | 0 |
attr |
GraniteMLP.down_proj |
1 | 0 | 0 |
attr |
GraniteMLP.act_fn |
1 | 0 | 0 |
meth |
GraniteForCausalLM.init |
3 | 2 | 0 |
attr |
GraniteForCausalLM.config |
1 | 0 | 0 |
attr |
GraniteForCausalLM.quant_config |
1 | 0 | 0 |
attr |
GraniteForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GraniteForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
GraniteModel.init |
3 | 2 | 0 |
attr |
GraniteModel.config |
1 | 0 | 0 |
attr |
GraniteModel.quant_config |
1 | 0 | 0 |
attr |
GraniteModel.embed_tokens |
1 | 0 | 0 |
attr |
GraniteModel.norm |
1 | 0 | 0 |
vllm.model_executor.models.granite_speech (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteSpeechConformerAttention.init |
3 | 2 | 0 |
attr |
GraniteSpeechConformerAttention.max_pos_emb |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.context_size |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.num_heads |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.dim_head |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.scale |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.pre_norm |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.to_q |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.to_kv |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.to_out |
1 | 0 | 0 |
attr |
GraniteSpeechConformerAttention.rel_pos_emb |
1 | 0 | 0 |
meth |
GraniteSpeechEncoderProjector.init |
5 | 4 | 0 |
attr |
GraniteSpeechEncoderProjector.hidden_size |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderProjector.downsample_rate |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderProjector.window_size |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderProjector.num_queries |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderProjector.query |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderProjector.qformer |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderProjector.linear |
1 | 0 | 0 |
meth |
GraniteSpeechMultiModalProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
GraniteSpeechMultiModalProcessingInfo.get_max_audio_tokens |
1 | 0 | 0 |
meth |
GraniteSpeechMultiModalProcessingInfo.get_max_audio_len |
1 | 0 | 0 |
meth |
GraniteSpeechConformerFeedForward.init |
4 | 3 | 0 |
attr |
GraniteSpeechConformerFeedForward.pre_norm |
1 | 0 | 0 |
attr |
GraniteSpeechConformerFeedForward.up_proj |
1 | 0 | 0 |
attr |
GraniteSpeechConformerFeedForward.silu |
1 | 0 | 0 |
attr |
GraniteSpeechConformerFeedForward.down_proj |
1 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration.init |
3 | 2 | 0 |
attr |
GraniteSpeechForConditionalGeneration.config |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.cache_config |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.projector |
1 | 0 | 0 |
meth |
GraniteSpeechConformerBlock.init |
3 | 2 | 0 |
attr |
GraniteSpeechConformerBlock.ff1 |
1 | 0 | 0 |
attr |
GraniteSpeechConformerBlock.attn |
1 | 0 | 0 |
attr |
GraniteSpeechConformerBlock.conv |
1 | 0 | 0 |
attr |
GraniteSpeechConformerBlock.ff2 |
1 | 0 | 0 |
attr |
GraniteSpeechConformerBlock.post_norm |
1 | 0 | 0 |
meth |
GraniteSpeechConformerConvModule.init |
3 | 2 | 0 |
attr |
GraniteSpeechConformerConvModule.norm |
1 | 0 | 0 |
attr |
GraniteSpeechConformerConvModule.up_conv |
1 | 0 | 0 |
attr |
GraniteSpeechConformerConvModule.glu |
1 | 0 | 0 |
attr |
GraniteSpeechConformerConvModule.depth_conv |
1 | 0 | 0 |
attr |
GraniteSpeechConformerConvModule.silu |
1 | 0 | 0 |
attr |
GraniteSpeechConformerConvModule.batch_norm |
1 | 0 | 0 |
attr |
GraniteSpeechConformerConvModule.down_conv |
1 | 0 | 0 |
meth |
GraniteSpeechConformerDepthWiseConv1d.init |
5 | 4 | 0 |
attr |
GraniteSpeechConformerDepthWiseConv1d.padding |
1 | 0 | 0 |
attr |
GraniteSpeechConformerDepthWiseConv1d.conv |
1 | 0 | 0 |
meth |
GraniteSpeechCTCEncoder.init |
4 | 3 | 0 |
meth |
GraniteSpeechCTCEncoder.forward |
2 | 1 | 0 |
attr |
GraniteSpeechCTCEncoder.config |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.attention_dists |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.input_linear |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.layers |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.out |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.out_mid |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.softmax |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.num_layers |
1 | 0 | 0 |
vllm.model_executor.models.granitemoe (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
GraniteMoeAttention.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
GraniteMoeAttention.num_heads |
1 | 0 | 0 |
attr |
GraniteMoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
GraniteMoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
GraniteMoeAttention.head_dim |
1 | 0 | 0 |
attr |
GraniteMoeAttention.q_size |
1 | 0 | 0 |
attr |
GraniteMoeAttention.kv_size |
1 | 0 | 0 |
attr |
GraniteMoeAttention.scaling |
1 | 0 | 0 |
attr |
GraniteMoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
GraniteMoeAttention.o_proj |
1 | 0 | 0 |
attr |
GraniteMoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
GraniteMoeAttention.attn |
1 | 0 | 0 |
meth |
GraniteMoeForCausalLM.init |
3 | 2 | 0 |
attr |
GraniteMoeForCausalLM.config |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
GraniteMoeModel.init |
3 | 2 | 0 |
attr |
GraniteMoeModel.config |
1 | 0 | 0 |
attr |
GraniteMoeModel.quant_config |
1 | 0 | 0 |
attr |
GraniteMoeModel.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeModel.embed_tokens |
1 | 0 | 0 |
attr |
GraniteMoeModel.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeModel.norm |
1 | 0 | 0 |
meth |
GraniteMoeMoE.init |
10 | 8 | 0 |
attr |
GraniteMoeMoE.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeMoE.is_sequence_parallel |
1 | 0 | 0 |
attr |
GraniteMoeMoE.gate |
1 | 0 | 0 |
attr |
GraniteMoeMoE.experts |
1 | 0 | 0 |
attr |
GraniteMoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
GraniteMoeDecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
GraniteMoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
GraniteMoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
GraniteMoeDecoderLayer.residual_multiplier |
1 | 0 | 0 |
vllm.model_executor.models.granitemoehybrid (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeHybridForCausalLM.init |
3 | 2 | 0 |
meth |
GraniteMoeHybridForCausalLM.forward |
6 | 4 | 0 |
attr |
GraniteMoeHybridForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.model_config |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.quant_config |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.config |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.causal |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.attention_bias |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.attention_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.total_num_heads |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.head_dim |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.num_heads |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.num_key_value_heads |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.qkv_proj |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.o_proj |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.attn |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttention.rotary_emb |
1 | 0 | 0 |
meth |
GraniteMoeHybridMambaDecoderLayer.forward |
4 | 2 | 0 |
attr |
GraniteMoeHybridMambaDecoderLayer.config |
1 | 0 | 0 |
attr |
GraniteMoeHybridMambaDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridMambaDecoderLayer.residual_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeHybridMambaDecoderLayer.mamba |
1 | 0 | 0 |
attr |
GraniteMoeHybridMambaDecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
GraniteMoeHybridMambaDecoderLayer.shared_mlp |
1 | 0 | 0 |
attr |
GraniteMoeHybridMambaDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
GraniteMoeHybridMambaDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
GraniteMoeHybridModel.init |
3 | 2 | 0 |
attr |
GraniteMoeHybridModel.config |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.quant_config |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.embed_tokens |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.norm |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttentionDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttentionDecoderLayer.residual_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttentionDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttentionDecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttentionDecoderLayer.shared_mlp |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttentionDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
GraniteMoeHybridAttentionDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.granitemoeshared (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeSharedForCausalLM.init |
3 | 2 | 0 |
attr |
GraniteMoeSharedForCausalLM.config |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
GraniteMoeSharedModel.init |
3 | 2 | 0 |
attr |
GraniteMoeSharedModel.config |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.quant_config |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.embed_tokens |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.norm |
1 | 0 | 0 |
meth |
GraniteMoeSharedMLP.init |
4 | 3 | 0 |
attr |
GraniteMoeSharedMLP.input_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedMLP.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedMLP.input_linear |
1 | 0 | 0 |
attr |
GraniteMoeSharedMLP.output_linear |
1 | 0 | 0 |
attr |
GraniteMoeSharedMLP.act_fn |
1 | 0 | 0 |
attr |
GraniteMoeSharedDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
GraniteMoeSharedDecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
GraniteMoeSharedDecoderLayer.shared_mlp |
1 | 0 | 0 |
attr |
GraniteMoeSharedDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
GraniteMoeSharedDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
GraniteMoeSharedDecoderLayer.residual_multiplier |
1 | 0 | 0 |
vllm.model_executor.models.gritlm (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
GritLMMeanPool.init |
2 | 1 | 0 |
attr |
GritLMMeanPool.model_config |
1 | 0 | 0 |
attr |
GritLMMeanPool.token_ids |
1 | 0 | 0 |
attr |
GritLMMeanPool.user_pattern_ids |
1 | 0 | 0 |
attr |
GritLMMeanPool.embed_newline_pattern_ids |
1 | 0 | 0 |
attr |
GritLMMeanPool.embed_pattern_ids |
1 | 0 | 0 |
meth |
GritLMPooler.init |
2 | 1 | 0 |
meth |
GritLM.init |
4 | 3 | 0 |
attr |
GritLM.pooler |
1 | 0 | 0 |
vllm.model_executor.models.grok1 (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Grok1DecoderLayer.init |
5 | 4 | 0 |
attr |
Grok1DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.use_fp8 |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.attn |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.moe_block |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.residual_moe |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.residual_moe_scale |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.pre_attn_norm |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.post_attn_norm |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.pre_moe_norm |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.post_moe_norm |
1 | 0 | 0 |
attr |
Grok1DecoderLayer.mlp |
1 | 0 | 0 |
meth |
Grok1Attention.init |
10 | 9 | 0 |
attr |
Grok1Attention.hidden_size |
1 | 0 | 0 |
attr |
Grok1Attention.config |
1 | 0 | 0 |
attr |
Grok1Attention.total_num_heads |
1 | 0 | 0 |
attr |
Grok1Attention.num_heads |
1 | 0 | 0 |
attr |
Grok1Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Grok1Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Grok1Attention.head_dim |
1 | 0 | 0 |
attr |
Grok1Attention.q_size |
1 | 0 | 0 |
attr |
Grok1Attention.kv_size |
1 | 0 | 0 |
attr |
Grok1Attention.scaling |
1 | 0 | 0 |
attr |
Grok1Attention.qkv_proj |
1 | 0 | 0 |
attr |
Grok1Attention.o_proj |
1 | 0 | 0 |
attr |
Grok1Attention.rotary_emb |
1 | 0 | 0 |
attr |
Grok1Attention.attn |
1 | 0 | 0 |
attr |
Grok1Attention.attn_multiplier |
1 | 0 | 0 |
meth |
GrokForCausalLM.new |
3 | 2 | 0 |
attr |
GrokForCausalLM.packed_modules_mapping |
1 | 0 | 0 |
meth |
Grok1MoE.init |
11 | 10 | 0 |
attr |
Grok1MoE.hidden_size |
1 | 0 | 0 |
attr |
Grok1MoE.gate |
1 | 0 | 0 |
attr |
Grok1MoE.experts |
1 | 0 | 0 |
attr |
Grok1MoE.router_logit_soft_cap |
1 | 0 | 0 |
attr |
Grok1MLP.gate_up_proj |
1 | 0 | 0 |
attr |
Grok1MLP.down_proj |
1 | 0 | 0 |
attr |
Grok1MLP.act_fn |
1 | 0 | 0 |
meth |
GrokBaseForCausalLM.init |
3 | 2 | 0 |
attr |
GrokBaseForCausalLM.config |
1 | 0 | 0 |
attr |
GrokBaseForCausalLM.quant_config |
1 | 0 | 0 |
attr |
GrokBaseForCausalLM.model |
1 | 0 | 0 |
attr |
GrokBaseForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GrokBaseForCausalLM.output_multiplier_scale |
1 | 0 | 0 |
attr |
GrokBaseForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
GrokBaseForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Grok1Model.init |
7 | 6 | 0 |
attr |
Grok1Model.config |
1 | 0 | 0 |
attr |
Grok1Model.quant_config |
1 | 0 | 0 |
attr |
Grok1Model.ckpt_gate_proj_name |
1 | 0 | 0 |
attr |
Grok1Model.ckpt_down_proj_name |
1 | 0 | 0 |
attr |
Grok1Model.ckpt_up_proj_name |
1 | 0 | 0 |
attr |
Grok1Model.weight_name_remapping |
1 | 0 | 0 |
attr |
Grok1Model.vocab_size |
1 | 0 | 0 |
attr |
Grok1Model.embedding_multiplier_scale |
1 | 0 | 0 |
attr |
Grok1Model.embed_tokens |
1 | 0 | 0 |
attr |
Grok1Model.norm |
1 | 0 | 0 |
attr |
Grok1Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.h2ovl (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
H2OVLChatModel._init_vision_model |
5 | 4 | 0 |
attr |
H2OVLProcessor.use_msac |
1 | 0 | 0 |
vllm.model_executor.models.hunyuan_v1 (91 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
HunYuanCrossAttention.hidden_size |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.total_num_heads |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.num_heads |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.num_kv_heads |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.q_size |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.kv_size |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.scaling |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.use_qk_norm |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.layer_id |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.q_proj |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.o_proj |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.rotary_emb |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.attn |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.head_dim |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.query_layernorm |
1 | 0 | 0 |
attr |
HunYuanCrossAttention.key_layernorm |
1 | 0 | 0 |
meth |
HunYuanSparseMoeBlock.init |
6 | 5 | 0 |
attr |
HunYuanSparseMoeBlock.tp_size |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.ep_group |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.ep_rank |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.ep_size |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.n_routed_experts |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.enable_eplb |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.n_logical_experts |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.n_redundant_experts |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.n_physical_experts |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.n_local_physical_experts |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.physical_expert_start |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.physical_expert_end |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.gate |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.experts |
1 | 0 | 0 |
attr |
HunYuanSparseMoeBlock.shared_mlp |
1 | 0 | 0 |
attr |
HunYuanDecoderLayer.layer_id |
1 | 0 | 0 |
attr |
HunYuanDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
HunYuanDecoderLayer.intermediate_size |
1 | 0 | 0 |
attr |
HunYuanDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
HunYuanDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
HunYuanDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
HunYuanDecoderLayer.mlp |
1 | 0 | 0 |
meth |
HunYuanModel.init |
3 | 2 | 0 |
meth |
HunYuanModel._split_qkv_weight |
2 | 1 | 0 |
meth |
HunYuanModel.load_weights |
2 | 1 | 0 |
attr |
HunYuanModel.num_redundant_experts |
1 | 0 | 0 |
attr |
HunYuanModel.config |
1 | 0 | 0 |
attr |
HunYuanModel.quant_config |
1 | 0 | 0 |
attr |
HunYuanModel.vocab_size |
1 | 0 | 0 |
attr |
HunYuanModel.aux_hidden_state_layers |
1 | 0 | 0 |
attr |
HunYuanModel.embed_tokens |
1 | 0 | 0 |
attr |
HunYuanModel.norm |
1 | 0 | 0 |
meth |
HunyuanV1ModelBase.init |
3 | 2 | 0 |
attr |
HunyuanV1ModelBase.config |
1 | 0 | 0 |
attr |
HunyuanV1ModelBase.quant_config |
1 | 0 | 0 |
attr |
HunyuanV1ModelBase.model |
1 | 0 | 0 |
attr |
HunyuanV1ModelBase.lm_head |
1 | 0 | 0 |
attr |
HunyuanV1ModelBase.logits_processor |
1 | 0 | 0 |
meth |
HunYuanMoEV1Base.init |
3 | 2 | 0 |
attr |
HunYuanMoEV1Base.expert_weights |
1 | 0 | 0 |
attr |
HunYuanMoEV1Base.num_expert_groups |
1 | 0 | 0 |
attr |
HunYuanMoEV1Base.moe_layers |
1 | 0 | 0 |
attr |
HunYuanMoEV1Base.num_moe_layers |
1 | 0 | 0 |
attr |
HunYuanMoEV1Base.num_logical_experts |
1 | 0 | 0 |
attr |
HunYuanMoEV1Base.num_physical_experts |
1 | 0 | 0 |
attr |
HunYuanMoEV1Base.num_local_physical_experts |
1 | 0 | 0 |
attr |
HunYuanMoEV1Base.num_routed_experts |
1 | 0 | 0 |
attr |
HunYuanMoEV1Base.num_redundant_experts |
1 | 0 | 0 |
attr |
HunYuanAttention.hidden_size |
1 | 0 | 0 |
attr |
HunYuanAttention.total_num_heads |
1 | 0 | 0 |
attr |
HunYuanAttention.num_heads |
1 | 0 | 0 |
attr |
HunYuanAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
HunYuanAttention.num_kv_heads |
1 | 0 | 0 |
attr |
HunYuanAttention.q_size |
1 | 0 | 0 |
attr |
HunYuanAttention.kv_size |
1 | 0 | 0 |
attr |
HunYuanAttention.scaling |
1 | 0 | 0 |
attr |
HunYuanAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
HunYuanAttention.use_qk_norm |
1 | 0 | 0 |
attr |
HunYuanAttention.layer_id |
1 | 0 | 0 |
attr |
HunYuanAttention.qkv_proj |
1 | 0 | 0 |
attr |
HunYuanAttention.o_proj |
1 | 0 | 0 |
attr |
HunYuanAttention.rotary_emb |
1 | 0 | 0 |
attr |
HunYuanAttention.attn |
1 | 0 | 0 |
attr |
HunYuanAttention.head_dim |
1 | 0 | 0 |
attr |
HunYuanAttention.query_layernorm |
1 | 0 | 0 |
attr |
HunYuanAttention.key_layernorm |
1 | 0 | 0 |
meth |
HunYuanMLP.forward |
2 | 0 | 0 |
attr |
HunYuanMLP.gate_up_proj |
1 | 0 | 0 |
attr |
HunYuanMLP.down_proj |
1 | 0 | 0 |
attr |
HunYuanMLP.act_fn |
1 | 0 | 0 |
meth |
HunYuanDenseV1Base.init |
3 | 2 | 0 |
vllm.model_executor.models.hunyuan_vision (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
HunYuanVisionAttention.tp_size |
1 | 0 | 0 |
attr |
HunYuanVisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
HunYuanVisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
HunYuanVisionAttention.qkv |
1 | 0 | 0 |
attr |
HunYuanVisionAttention.o_proj |
1 | 0 | 0 |
attr |
HunYuanVisionAttention.scale |
1 | 0 | 0 |
attr |
HunYuanVisionAttention.attn |
1 | 0 | 0 |
meth |
HunYuanVisionPatchEmbed.init |
2 | 1 | 0 |
attr |
HunYuanVisionPatchEmbed.config |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.embed_dim |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.num_channels |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.spatial_merge_size |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.interpolate_mode |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.patch_embedding |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.max_num_patches |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.num_positions |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.position_edge |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.position_embedding |
1 | 0 | 0 |
attr |
HunYuanVisionPatchEmbed.patch_pos_embed |
1 | 0 | 0 |
attr |
HunYuanVisionBlock.input_layernorm |
1 | 0 | 0 |
attr |
HunYuanVisionBlock.post_attention_layernorm |
1 | 0 | 0 |
attr |
HunYuanVisionBlock.self_attn |
1 | 0 | 0 |
attr |
HunYuanVisionBlock.mlp |
1 | 0 | 0 |
meth |
HunYuanVisionPatchMerger.init |
6 | 0 | 0 |
meth |
HunYuanVisionPatchMerger.forward |
3 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.spatial_merge_size |
1 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.proj |
1 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.mlp |
1 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.image_newline |
1 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.image_begin |
1 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.image_end |
1 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.image_sep |
1 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.before_rms |
1 | 0 | 0 |
attr |
HunYuanVisionPatchMerger.after_rms |
1 | 0 | 0 |
meth |
HunYuanVLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
HunYuanVLProcessingInfo.get_data_parser |
1 | 0 | 0 |
attr |
HunYuanVisionTransformer.hidden_size |
1 | 0 | 0 |
attr |
HunYuanVisionTransformer.num_heads |
1 | 0 | 0 |
attr |
HunYuanVisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
HunYuanVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
HunYuanVisionTransformer.layers |
1 | 0 | 0 |
attr |
HunYuanVisionTransformer.perceive |
1 | 0 | 0 |
meth |
HunYuanVisionMLP.init |
7 | 6 | 0 |
meth |
HunYuanVisionMLP.forward |
2 | 1 | 0 |
attr |
HunYuanVisionMLP.dense_h_to_4h |
1 | 0 | 0 |
attr |
HunYuanVisionMLP.dense_4h_to_h |
1 | 0 | 0 |
attr |
HunYuanVisionMLP.act_fn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
HunYuanVLForConditionalGeneration.init |
3 | 2 | 0 |
attr |
HunYuanVLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
HunYuanVLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
HunYuanVLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
HunYuanVLForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
HunYuanVLForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.hyperclovax_vision (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HCXVisionMlp.init |
6 | 0 | 0 |
meth |
HCXVisionMlp.forward |
2 | 0 | 0 |
attr |
HCXVisionMlp.mm_projector_type |
1 | 0 | 0 |
attr |
HCXVisionMlp.fc1 |
1 | 0 | 0 |
attr |
HCXVisionMlp.act |
1 | 0 | 0 |
attr |
HCXVisionMlp.fc2 |
1 | 0 | 0 |
meth |
HCXVisionCAbstractor.init |
8 | 7 | 0 |
meth |
HCXVisionCAbstractor.build_net |
7 | 6 | 0 |
meth |
HCXVisionCAbstractor.build_mlp |
4 | 3 | 0 |
attr |
HCXVisionCAbstractor.num_input_tokens |
1 | 0 | 0 |
attr |
HCXVisionCAbstractor.output_hidden_size |
1 | 0 | 0 |
attr |
HCXVisionCAbstractor.dtype |
1 | 0 | 0 |
attr |
HCXVisionCAbstractor.pos_emb |
1 | 0 | 0 |
attr |
HCXVisionCAbstractor.prenorm |
1 | 0 | 0 |
func |
init_vision_tower_for_hcxvision |
6 | 5 | 0 |
meth |
HCXVisionForCausalLM._prepare_multimodal_kwargs |
2 | 1 | 0 |
meth |
HCXVisionForCausalLM._init_possible_resolutions |
3 | 0 | 0 |
meth |
HCXVisionForCausalLM._init_mm_projector |
4 | 0 | 0 |
attr |
HCXVisionForCausalLM.dtype |
1 | 0 | 0 |
attr |
HCXVisionForCausalLM.config |
1 | 0 | 0 |
attr |
HCXVisionForCausalLM.vision_config |
1 | 0 | 0 |
attr |
HCXVisionForCausalLM.text_config |
1 | 0 | 0 |
attr |
HCXVisionForCausalLM.vision_model |
1 | 0 | 0 |
attr |
HCXVisionForCausalLM.mm_projector |
1 | 0 | 0 |
attr |
HCXVisionForCausalLM.language_model |
1 | 0 | 0 |
attr |
HCXVisionForCausalLM.image_newline |
1 | 0 | 0 |
meth |
HCXVisionProcessingInfo.get_vision_encoder_info |
1 | 0 | 0 |
vllm.model_executor.models.idefics2_vision_model (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Idefics2VisionAttention.config |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.embed_dim |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.num_heads |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.head_dim |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.scale |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.dropout |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.num_heads_per_partition |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.qkv_proj |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.out_proj |
1 | 0 | 0 |
attr |
Idefics2VisionAttention.attn |
1 | 0 | 0 |
attr |
Idefics2Encoder.config |
1 | 0 | 0 |
attr |
Idefics2Encoder.layers |
1 | 0 | 0 |
attr |
Idefics2EncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
Idefics2EncoderLayer.self_attn |
1 | 0 | 0 |
attr |
Idefics2EncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
Idefics2EncoderLayer.mlp |
1 | 0 | 0 |
attr |
Idefics2EncoderLayer.layer_norm2 |
1 | 0 | 0 |
attr |
Idefics2VisionMLP.config |
1 | 0 | 0 |
attr |
Idefics2VisionMLP.activation_fn |
1 | 0 | 0 |
attr |
Idefics2VisionMLP.fc1 |
1 | 0 | 0 |
attr |
Idefics2VisionMLP.fc2 |
1 | 0 | 0 |
meth |
Idefics2VisionEmbeddings.init |
2 | 1 | 0 |
attr |
Idefics2VisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
Idefics2VisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
Idefics2VisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
Idefics2VisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
Idefics2VisionEmbeddings.num_patches_per_side |
1 | 0 | 0 |
attr |
Idefics2VisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
Idefics2VisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
Idefics2VisionEmbeddings.position_embedding |
1 | 0 | 0 |
meth |
Idefics2VisionTransformer.get_input_embeddings |
1 | 0 | 0 |
meth |
Idefics2VisionTransformer.forward |
4 | 3 | 0 |
attr |
Idefics2VisionTransformer.config |
1 | 0 | 0 |
attr |
Idefics2VisionTransformer.use_data_parallel |
1 | 0 | 0 |
attr |
Idefics2VisionTransformer.embeddings |
1 | 0 | 0 |
attr |
Idefics2VisionTransformer.encoder |
1 | 0 | 0 |
attr |
Idefics2VisionTransformer.require_post_norm |
1 | 0 | 0 |
attr |
Idefics2VisionTransformer.post_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.idefics3 (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics3Model.init |
3 | 2 | 0 |
attr |
Idefics3Model.config |
1 | 0 | 0 |
attr |
Idefics3Model.vocab_size |
1 | 0 | 0 |
attr |
Idefics3Model.vision_model |
1 | 0 | 0 |
attr |
Idefics3Model.connector |
1 | 0 | 0 |
attr |
Idefics3Model.text_model |
1 | 0 | 0 |
attr |
Idefics3Model.image_seq_len |
1 | 0 | 0 |
attr |
Idefics3Model.image_token_id |
1 | 0 | 0 |
meth |
Idefics3SimpleMLP.init |
4 | 3 | 0 |
attr |
Idefics3SimpleMLP.proj |
1 | 0 | 0 |
meth |
Idefics3Connector.init |
4 | 3 | 0 |
attr |
Idefics3Connector.scale_factor |
1 | 0 | 0 |
attr |
Idefics3Connector.modality_projection |
1 | 0 | 0 |
meth |
Idefics3ForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Idefics3ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.image_token_id |
1 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.logits_processor |
1 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.model |
1 | 0 | 0 |
vllm.model_executor.models.interfaces (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SupportsQuant.new |
3 | 1 | 0 |
meth |
SupportsQuant._find_quant_config |
3 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.interfaces_base (3 missing, 6 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_default_tok_pooling_type |
2 | 2 | 1 |
func |
default_pooling_type |
3 | 2 | 2 |
func |
get_default_seq_pooling_type |
2 | 2 | 1 |
func |
get_attn_type |
2 | 2 | 1 |
attr |
logger |
1 | 0 | 0 |
func |
attn_type |
2 | 1 | 1 |
vllm.model_executor.models.intern_vit (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternVisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
InternVisionModel.config |
1 | 0 | 0 |
attr |
InternVisionModel.use_data_parallel |
1 | 0 | 0 |
attr |
InternVisionModel.embeddings |
1 | 0 | 0 |
attr |
InternVisionModel.encoder |
1 | 0 | 0 |
attr |
InternMLP.config |
1 | 0 | 0 |
attr |
InternMLP.activation_fn |
1 | 0 | 0 |
attr |
InternMLP.fc1 |
1 | 0 | 0 |
attr |
InternMLP.fc2 |
1 | 0 | 0 |
meth |
InternVisionEncoder.init |
7 | 6 | 0 |
meth |
InternVisionEncoder.forward |
2 | 1 | 0 |
attr |
InternVisionEncoder.config |
1 | 0 | 0 |
attr |
InternVisionEncoder.layer_cls |
1 | 0 | 0 |
attr |
InternVisionEncoder.layers |
1 | 0 | 0 |
meth |
InternParallelAttention._apply_qk_norm |
3 | 2 | 0 |
attr |
InternParallelAttention.config |
1 | 0 | 0 |
attr |
InternParallelAttention.embed_dim |
1 | 0 | 0 |
attr |
InternParallelAttention.num_heads |
1 | 0 | 0 |
attr |
InternParallelAttention.head_dim |
1 | 0 | 0 |
attr |
InternParallelAttention.tp_size |
1 | 0 | 0 |
attr |
InternParallelAttention.tp_rank |
1 | 0 | 0 |
attr |
InternParallelAttention.dummy_dim |
1 | 0 | 0 |
attr |
InternParallelAttention.num_heads_per_partition |
1 | 0 | 0 |
attr |
InternParallelAttention.scale |
1 | 0 | 0 |
attr |
InternParallelAttention.qkv |
1 | 0 | 0 |
attr |
InternParallelAttention.qk_normalization |
1 | 0 | 0 |
attr |
InternParallelAttention.proj |
1 | 0 | 0 |
attr |
InternParallelAttention.attn |
1 | 0 | 0 |
attr |
InternParallelAttention.q_norm |
1 | 0 | 0 |
attr |
InternParallelAttention.k_norm |
1 | 0 | 0 |
meth |
InternVisionEmbeddings.init |
2 | 1 | 0 |
meth |
InternVisionEmbeddings._get_pos_embed |
4 | 3 | 0 |
attr |
InternVisionEmbeddings.config |
1 | 0 | 0 |
attr |
InternVisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
InternVisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
InternVisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
InternVisionEmbeddings.class_embedding |
1 | 0 | 0 |
attr |
InternVisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
InternVisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
InternVisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
InternVisionEmbeddings.position_embedding |
1 | 0 | 0 |
meth |
InternVisionPatchModel.init |
2 | 1 | 0 |
meth |
InternVisionPatchModel.get_input_embeddings |
1 | 0 | 0 |
attr |
InternVisionPatchModel.config |
1 | 0 | 0 |
attr |
InternVisionPatchModel.embeddings |
1 | 0 | 0 |
meth |
InternVisionEncoderLayer._init_attn |
5 | 4 | 0 |
meth |
InternVisionEncoderLayer.forward |
2 | 1 | 0 |
attr |
InternVisionEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.intermediate_size |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.norm_type |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.attn_cls |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.attn |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.mlp |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.norm1 |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.norm2 |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.ls1 |
1 | 0 | 0 |
attr |
InternVisionEncoderLayer.ls2 |
1 | 0 | 0 |
vllm.model_executor.models.internlm2 (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternLM2Attention.split_qkv |
2 | 1 | 0 |
attr |
InternLM2Attention.hidden_size |
1 | 0 | 0 |
attr |
InternLM2Attention.tp_size |
1 | 0 | 0 |
attr |
InternLM2Attention.tp_rank |
1 | 0 | 0 |
attr |
InternLM2Attention.total_num_heads |
1 | 0 | 0 |
attr |
InternLM2Attention.num_heads |
1 | 0 | 0 |
attr |
InternLM2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
InternLM2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
InternLM2Attention.head_dim |
1 | 0 | 0 |
attr |
InternLM2Attention.q_size |
1 | 0 | 0 |
attr |
InternLM2Attention.kv_size |
1 | 0 | 0 |
attr |
InternLM2Attention.key_value_groups |
1 | 0 | 0 |
attr |
InternLM2Attention.scaling |
1 | 0 | 0 |
attr |
InternLM2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
InternLM2Attention.wqkv |
1 | 0 | 0 |
attr |
InternLM2Attention.wo |
1 | 0 | 0 |
attr |
InternLM2Attention.rotary_emb |
1 | 0 | 0 |
attr |
InternLM2Attention.attn |
1 | 0 | 0 |
meth |
InternLM2Model.init |
4 | 3 | 0 |
attr |
InternLM2Model.config |
1 | 0 | 0 |
attr |
InternLM2Model.vocab_size |
1 | 0 | 0 |
attr |
InternLM2Model.tok_embeddings |
1 | 0 | 0 |
attr |
InternLM2Model.norm |
1 | 0 | 0 |
attr |
InternLM2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
InternLM2MLP.forward |
2 | 0 | 0 |
attr |
InternLM2MLP.gate_up_proj |
1 | 0 | 0 |
attr |
InternLM2MLP.w2 |
1 | 0 | 0 |
attr |
InternLM2MLP.act_fn |
1 | 0 | 0 |
attr |
InternLMDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
InternLMDecoderLayer.attention |
1 | 0 | 0 |
attr |
InternLMDecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
InternLMDecoderLayer.attention_norm |
1 | 0 | 0 |
attr |
InternLMDecoderLayer.ffn_norm |
1 | 0 | 0 |
meth |
InternLM2ForRewardModel.init |
4 | 3 | 0 |
attr |
InternLM2ForRewardModel.head_dtype |
1 | 0 | 0 |
attr |
InternLM2ForRewardModel.v_head |
1 | 0 | 0 |
attr |
InternLM2ForRewardModel.pooler |
1 | 0 | 0 |
meth |
InternLM2ForCausalLM.init |
4 | 3 | 0 |
attr |
InternLM2ForCausalLM.config |
1 | 0 | 0 |
attr |
InternLM2ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
InternLM2ForCausalLM.model |
1 | 0 | 0 |
attr |
InternLM2ForCausalLM.output |
1 | 0 | 0 |
attr |
InternLM2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
InternLM2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.internlm2_ve (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternLM2VEForCausalLM.init |
3 | 2 | 0 |
meth |
InternLM2VEModel.init |
3 | 2 | 0 |
attr |
InternLM2VEDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
InternLM2VEDecoderLayer.attention |
1 | 0 | 0 |
attr |
InternLM2VEDecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
InternLM2VEDecoderLayer.feed_forward_ve |
1 | 0 | 0 |
attr |
InternLM2VEDecoderLayer.attention_norm |
1 | 0 | 0 |
attr |
InternLM2VEDecoderLayer.ffn_norm |
1 | 0 | 0 |
vllm.model_executor.models.interns1 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternS1ForConditionalGeneration._init_vision_model |
4 | 3 | 0 |
meth |
InternS1ForConditionalGeneration.pixel_shuffle |
3 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.patch_size |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.num_image_token |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.downsample_ratio |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.img_context_token_id |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.video_context_token_id |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.visual_token_mask |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
InternS1ForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
InternS1ProcessingInfo.resolve_target_ratios |
2 | 1 | 0 |
meth |
InternS1MultiModalProjector.init |
2 | 0 | 0 |
meth |
InternS1MultiModalProjector.forward |
2 | 0 | 0 |
attr |
InternS1MultiModalProjector.layer_norm |
1 | 0 | 0 |
attr |
InternS1MultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
InternS1MultiModalProjector.act |
1 | 0 | 0 |
attr |
InternS1MultiModalProjector.linear_2 |
1 | 0 | 0 |
vllm.model_executor.models.interns1_pro (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternS1ProMoeSparseMoeBlock.init |
3 | 2 | 0 |
meth |
InternS1ProMoeSparseMoeBlock.get_group_offsets |
4 | 3 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.tp_size |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.ep_group |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.ep_rank |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.ep_size |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.n_routed_experts |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.is_sequence_parallel |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.enable_eplb |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.n_logical_experts |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.n_redundant_experts |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.n_physical_experts |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.n_local_physical_experts |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.physical_expert_start |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.physical_expert_end |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.n_groups |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.experts |
1 | 0 | 0 |
attr |
InternS1ProMoeSparseMoeBlock.gate |
1 | 0 | 0 |
meth |
InternS1ProMoeMixtureOfExperts.set_moe_parameters |
1 | 0 | 0 |
meth |
InternS1ProMoeLLMForCausalLM.init |
3 | 2 | 0 |
attr |
InternS1ProMoeLLMForCausalLM.config |
1 | 0 | 0 |
attr |
InternS1ProMoeLLMForCausalLM.quant_config |
1 | 0 | 0 |
attr |
InternS1ProMoeLLMForCausalLM.model |
1 | 0 | 0 |
attr |
InternS1ProMoeLLMForCausalLM.lm_head |
1 | 0 | 0 |
attr |
InternS1ProMoeLLMForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
InternS1ProMoeLLMForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
InternS1ProProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
InternS1ProForConditionalGeneration.init |
3 | 2 | 0 |
meth |
InternS1ProForConditionalGeneration.load_weights |
2 | 1 | 0 |
attr |
InternS1ProForConditionalGeneration.packed_modules_mapping |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.config |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.video_pruning_rate |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.is_multimodal_pruning_enabled |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.use_deepstack |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.deepstack_num_level |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.visual_dim |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.multiscale_dim |
1 | 0 | 0 |
attr |
InternS1ProForConditionalGeneration.visual |
1 | 0 | 0 |
meth |
InternS1ProMoeMLP.forward |
2 | 0 | 0 |
attr |
InternS1ProMoeMLP.gate_up_proj |
1 | 0 | 0 |
attr |
InternS1ProMoeMLP.down_proj |
1 | 0 | 0 |
attr |
InternS1ProMoeMLP.act_fn |
1 | 0 | 0 |
meth |
InternS1ProMoeLLMModel.init |
4 | 3 | 0 |
attr |
InternS1ProMoeAttention.hidden_size |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.num_heads |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.head_dim |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.q_size |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.kv_size |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.scaling |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.dual_chunk_attention_config |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.o_proj |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.attn |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.q_norm |
1 | 0 | 0 |
attr |
InternS1ProMoeAttention.k_norm |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
InternS1ProMoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
InternS1ProMoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
InternS1ProMoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
InternS1ProMoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
InternS1ProMoeDecoderLayer.mlp |
1 | 0 | 0 |
vllm.model_executor.models.interns1_vit (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternS1VisionEncoder.init |
6 | 5 | 0 |
meth |
InternS1VisionEncoder.forward |
2 | 1 | 0 |
attr |
InternS1VisionEncoder.config |
1 | 0 | 0 |
attr |
InternS1VisionEncoder.layer |
1 | 0 | 0 |
attr |
InternSdpaAttention.config |
1 | 0 | 0 |
attr |
InternSdpaAttention.embed_dim |
1 | 0 | 0 |
attr |
InternSdpaAttention.num_heads |
1 | 0 | 0 |
attr |
InternSdpaAttention.head_dim |
1 | 0 | 0 |
attr |
InternSdpaAttention.dummy_dim |
1 | 0 | 0 |
attr |
InternSdpaAttention.scale |
1 | 0 | 0 |
attr |
InternSdpaAttention.q_proj |
1 | 0 | 0 |
attr |
InternSdpaAttention.k_proj |
1 | 0 | 0 |
attr |
InternSdpaAttention.v_proj |
1 | 0 | 0 |
attr |
InternSdpaAttention.qk_normalization |
1 | 0 | 0 |
attr |
InternSdpaAttention.projection_layer |
1 | 0 | 0 |
attr |
InternSdpaAttention.attn |
1 | 0 | 0 |
attr |
InternSdpaAttention.q_norm |
1 | 0 | 0 |
attr |
InternSdpaAttention.k_norm |
1 | 0 | 0 |
meth |
InternS1VisionLayer._init_attn |
5 | 4 | 0 |
meth |
InternS1VisionLayer.forward |
2 | 1 | 0 |
attr |
InternS1VisionLayer.attention |
1 | 0 | 0 |
attr |
InternS1VisionLayer.mlp |
1 | 0 | 0 |
attr |
InternS1VisionLayer.layernorm_before |
1 | 0 | 0 |
attr |
InternS1VisionLayer.layernorm_after |
1 | 0 | 0 |
attr |
InternS1VisionLayer.lambda_1 |
1 | 0 | 0 |
attr |
InternS1VisionLayer.lambda_2 |
1 | 0 | 0 |
meth |
InternS1VisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
InternS1VisionModel.config |
1 | 0 | 0 |
attr |
InternS1VisionModel.embeddings |
1 | 0 | 0 |
attr |
InternS1VisionModel.encoder |
1 | 0 | 0 |
attr |
InternS1VisionModel.layernorm |
1 | 0 | 0 |
meth |
InternS1VisionPatchEmbeddings.init |
2 | 0 | 0 |
attr |
InternS1VisionPatchEmbeddings.image_size |
1 | 0 | 0 |
attr |
InternS1VisionPatchEmbeddings.patch_size |
1 | 0 | 0 |
attr |
InternS1VisionPatchEmbeddings.num_channels |
1 | 0 | 0 |
attr |
InternS1VisionPatchEmbeddings.num_patches |
1 | 0 | 0 |
attr |
InternS1VisionPatchEmbeddings.patch_shape |
1 | 0 | 0 |
attr |
InternS1VisionPatchEmbeddings.projection |
1 | 0 | 0 |
attr |
InternS1VisionMLP.config |
1 | 0 | 0 |
attr |
InternS1VisionMLP.activation_fn |
1 | 0 | 0 |
attr |
InternS1VisionMLP.fc1 |
1 | 0 | 0 |
attr |
InternS1VisionMLP.fc2 |
1 | 0 | 0 |
meth |
InternS1VisionEmbeddings.init |
2 | 1 | 0 |
attr |
InternS1VisionEmbeddings.config |
1 | 0 | 0 |
attr |
InternS1VisionEmbeddings.cls_token |
1 | 0 | 0 |
attr |
InternS1VisionEmbeddings.patch_embeddings |
1 | 0 | 0 |
attr |
InternS1VisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
InternS1VisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
InternS1VisionEmbeddings.mask_token |
1 | 0 | 0 |
attr |
InternS1VisionEmbeddings.position_embeddings |
1 | 0 | 0 |
vllm.model_executor.models.internvl (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternVLProcessingInfo.get_supported_mm_limits |
1 | 0 | 0 |
prop |
InternVLProcessingInfo.supports_video |
1 | 0 | 0 |
meth |
InternVLProcessor._preprocess_video |
4 | 3 | 0 |
attr |
InternVLProcessor.video_token |
1 | 0 | 0 |
meth |
BaseInternVLProcessor._make_batch_input |
2 | 1 | 0 |
attr |
BaseInternVLProcessor.config |
1 | 0 | 0 |
attr |
BaseInternVLProcessor.tokenizer |
1 | 0 | 0 |
attr |
BaseInternVLProcessor.num_image_token |
1 | 0 | 0 |
attr |
BaseInternVLProcessor.image_size |
1 | 0 | 0 |
attr |
BaseInternVLProcessor.min_dynamic_patch |
1 | 0 | 0 |
attr |
BaseInternVLProcessor.max_dynamic_patch |
1 | 0 | 0 |
attr |
BaseInternVLProcessor.dynamic_image_size |
1 | 0 | 0 |
func |
build_transform |
2 | 1 | 0 |
meth |
InternVLChatModel._patch_quant_config |
3 | 2 | 0 |
meth |
InternVLChatModel._init_vision_model |
5 | 4 | 0 |
meth |
InternVLChatModel.pixel_shuffle |
3 | 0 | 0 |
attr |
InternVLChatModel.config |
1 | 0 | 0 |
attr |
InternVLChatModel.multimodal_config |
1 | 0 | 0 |
attr |
InternVLChatModel.use_data_parallel |
1 | 0 | 0 |
attr |
InternVLChatModel.patch_size |
1 | 0 | 0 |
attr |
InternVLChatModel.patch_tokens |
1 | 0 | 0 |
attr |
InternVLChatModel.num_image_token |
1 | 0 | 0 |
attr |
InternVLChatModel.downsample_ratio |
1 | 0 | 0 |
attr |
InternVLChatModel.ps_version |
1 | 0 | 0 |
attr |
InternVLChatModel.is_mono |
1 | 0 | 0 |
attr |
InternVLChatModel.img_context_token_id |
1 | 0 | 0 |
attr |
InternVLChatModel.video_context_token_id |
1 | 0 | 0 |
attr |
InternVLChatModel.visual_token_mask |
1 | 0 | 0 |
attr |
InternVLChatModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
InternVLChatModel.vision_model |
1 | 0 | 0 |
attr |
InternVLChatModel.mlp1 |
1 | 0 | 0 |
attr |
InternVLChatModel.language_model |
1 | 0 | 0 |
vllm.model_executor.models.iquest_loopcoder (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LoopCoderAttention.layer_idx |
1 | 0 | 0 |
attr |
LoopCoderAttention.hidden_size |
1 | 0 | 0 |
attr |
LoopCoderAttention.total_num_heads |
1 | 0 | 0 |
attr |
LoopCoderAttention.num_heads |
1 | 0 | 0 |
attr |
LoopCoderAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
LoopCoderAttention.num_kv_heads |
1 | 0 | 0 |
attr |
LoopCoderAttention.head_dim |
1 | 0 | 0 |
attr |
LoopCoderAttention.q_size |
1 | 0 | 0 |
attr |
LoopCoderAttention.kv_size |
1 | 0 | 0 |
attr |
LoopCoderAttention.scaling |
1 | 0 | 0 |
attr |
LoopCoderAttention.dual_chunk_attention_config |
1 | 0 | 0 |
attr |
LoopCoderAttention.loop_num |
1 | 0 | 0 |
attr |
LoopCoderAttention.loop_window_size |
1 | 0 | 0 |
attr |
LoopCoderAttention.qkv_proj |
1 | 0 | 0 |
attr |
LoopCoderAttention.o_proj |
1 | 0 | 0 |
attr |
LoopCoderAttention.rotary_emb |
1 | 0 | 0 |
attr |
LoopCoderAttention.attn |
1 | 0 | 0 |
meth |
IQuestLoopCoderModel.init |
4 | 3 | 0 |
attr |
IQuestLoopCoderModel.config |
1 | 0 | 0 |
attr |
IQuestLoopCoderModel.quant_config |
1 | 0 | 0 |
attr |
IQuestLoopCoderModel.vocab_size |
1 | 0 | 0 |
attr |
IQuestLoopCoderModel.embed_tokens |
1 | 0 | 0 |
attr |
IQuestLoopCoderModel.loop_num |
1 | 0 | 0 |
attr |
IQuestLoopCoderModel.window_size |
1 | 0 | 0 |
attr |
IQuestLoopCoderModel.norm |
1 | 0 | 0 |
attr |
LoopCoderDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
LoopCoderDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
LoopCoderDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
LoopCoderDecoderLayer.mlp |
1 | 0 | 0 |
attr |
LoopCoderDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
LoopCoderDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
IQuestLoopCoderForCausalLM.init |
3 | 2 | 0 |
attr |
IQuestLoopCoderForCausalLM.config |
1 | 0 | 0 |
attr |
IQuestLoopCoderForCausalLM.quant_config |
1 | 0 | 0 |
attr |
IQuestLoopCoderForCausalLM.model |
1 | 0 | 0 |
attr |
IQuestLoopCoderForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
IQuestLoopCoderForCausalLM.lm_head |
1 | 0 | 0 |
meth |
LoopGateProjection.init |
5 | 4 | 0 |
attr |
LoopGateProjection.total_num_heads |
1 | 0 | 0 |
attr |
LoopGateProjection.head_dim |
1 | 0 | 0 |
attr |
LoopGateProjection.num_heads |
1 | 0 | 0 |
attr |
LoopGateProjection.gate_proj |
1 | 0 | 0 |
vllm.model_executor.models.isaac (62 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IsaacForConditionalGeneration.init |
3 | 2 | 0 |
attr |
IsaacForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
IsaacForConditionalGeneration.config |
1 | 0 | 0 |
attr |
IsaacForConditionalGeneration.vision_token_id |
1 | 0 | 0 |
attr |
IsaacForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
IsaacForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
IsaacForConditionalGeneration.vision_embedding |
1 | 0 | 0 |
meth |
IsaacProcessor.init |
4 | 0 | 0 |
meth |
IsaacProcessor.call |
4 | 1 | 0 |
meth |
IsaacProcessor.apply_chat_template |
5 | 4 | 1 |
attr |
IsaacProcessor.image_token |
1 | 0 | 0 |
attr |
IsaacProcessor.image_processor |
1 | 0 | 0 |
attr |
IsaacProcessor.tokenizer |
1 | 0 | 0 |
attr |
Siglip2Encoder.config |
1 | 0 | 0 |
attr |
Siglip2Encoder.layers |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.self_attn |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.layer_norm2 |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.mlp |
1 | 0 | 0 |
meth |
Siglip2VariableSequenceEmbeddings.init |
2 | 1 | 0 |
meth |
Siglip2VariableSequenceEmbeddings.forward |
2 | 1 | 0 |
attr |
Siglip2VariableSequenceEmbeddings.config |
1 | 0 | 0 |
attr |
Siglip2VariableSequenceEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
Siglip2VariableSequenceEmbeddings.patch_size |
1 | 0 | 0 |
attr |
Siglip2VariableSequenceEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
Siglip2VariableSequenceEmbeddings.num_patches |
1 | 0 | 0 |
attr |
Siglip2VariableSequenceEmbeddings.position_embedding_size |
1 | 0 | 0 |
attr |
Siglip2VariableSequenceEmbeddings.position_embedding |
1 | 0 | 0 |
meth |
IsaacImageProcessor.init |
2 | 0 | 0 |
attr |
IsaacImageProcessor.patch_size |
1 | 0 | 0 |
attr |
IsaacImageProcessor.pixel_shuffle_scale |
1 | 0 | 0 |
attr |
IsaacImageProcessor.vision_max_num_patches |
1 | 0 | 0 |
attr |
IsaacImageProcessor.vision_min_num_patches |
1 | 0 | 0 |
meth |
Siglip2VisionTransformer.init |
4 | 3 | 0 |
attr |
Siglip2VisionTransformer.config |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.quant_config |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.embeddings |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.pixel_shuffle_scale_factor |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.encoder |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.post_layernorm |
1 | 0 | 0 |
meth |
IsaacProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
IsaacProcessingInfo.get_tokenizer |
1 | 0 | 0 |
meth |
IsaacProcessingInfo.get_image_processor |
2 | 1 | 0 |
meth |
IsaacVisionEmbedding.init |
6 | 5 | 0 |
attr |
IsaacVisionEmbedding.transformer |
1 | 0 | 0 |
attr |
IsaacVisionEmbedding.linear_fc1 |
1 | 0 | 0 |
attr |
IsaacVisionEmbedding.act |
1 | 0 | 0 |
attr |
IsaacVisionEmbedding.linear_fc2 |
1 | 0 | 0 |
attr |
Siglip2VisionAttention.tp_size |
1 | 0 | 0 |
attr |
Siglip2VisionAttention.tp_rank |
1 | 0 | 0 |
attr |
Siglip2VisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
Siglip2VisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
Siglip2VisionAttention.qkv_proj |
1 | 0 | 0 |
attr |
Siglip2VisionAttention.out_proj |
1 | 0 | 0 |
attr |
Siglip2VisionAttention.attn |
1 | 0 | 0 |
vllm.model_executor.models.jais (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JAISBlock.init |
5 | 4 | 0 |
attr |
JAISBlock.ln_1 |
1 | 0 | 0 |
attr |
JAISBlock.attn |
1 | 0 | 0 |
attr |
JAISBlock.ln_2 |
1 | 0 | 0 |
attr |
JAISBlock.mlp |
1 | 0 | 0 |
meth |
JAISLMHeadModel.init |
3 | 2 | 0 |
attr |
JAISLMHeadModel.config |
1 | 0 | 0 |
attr |
JAISLMHeadModel.quant_config |
1 | 0 | 0 |
attr |
JAISLMHeadModel.transformer |
1 | 0 | 0 |
attr |
JAISLMHeadModel.logits_processor |
1 | 0 | 0 |
attr |
JAISLMHeadModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
JAISLMHeadModel.lm_head |
1 | 0 | 0 |
attr |
JAISLMHeadModel.output_logits_scale |
1 | 0 | 0 |
meth |
JAISModel.init |
3 | 2 | 0 |
attr |
JAISModel.config |
1 | 0 | 0 |
attr |
JAISModel.embed_dim |
1 | 0 | 0 |
attr |
JAISModel.wte |
1 | 0 | 0 |
attr |
JAISModel.wpe |
1 | 0 | 0 |
attr |
JAISModel.ln_f |
1 | 0 | 0 |
attr |
JAISModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
JAISModel.embeddings_scale |
1 | 0 | 0 |
meth |
JAISMLP.init |
5 | 4 | 0 |
attr |
JAISMLP.swiglu |
1 | 0 | 0 |
attr |
JAISMLP.c_fc |
1 | 0 | 0 |
attr |
JAISMLP.c_fc2 |
1 | 0 | 0 |
attr |
JAISMLP.c_proj |
1 | 0 | 0 |
attr |
JAISMLP.act |
1 | 0 | 0 |
meth |
JAISAttention.init |
5 | 4 | 0 |
attr |
JAISAttention.hidden_size |
1 | 0 | 0 |
attr |
JAISAttention.num_heads |
1 | 0 | 0 |
attr |
JAISAttention.head_dim |
1 | 0 | 0 |
attr |
JAISAttention.attn_scale_power |
1 | 0 | 0 |
attr |
JAISAttention.scale |
1 | 0 | 0 |
attr |
JAISAttention.c_attn |
1 | 0 | 0 |
attr |
JAISAttention.c_proj |
1 | 0 | 0 |
attr |
JAISAttention.attn |
1 | 0 | 0 |
vllm.model_executor.models.jais2 (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Jais2DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Jais2DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Jais2DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Jais2DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Jais2DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
Jais2MLP.forward |
2 | 0 | 0 |
attr |
Jais2MLP.up_proj |
1 | 0 | 0 |
attr |
Jais2MLP.down_proj |
1 | 0 | 0 |
attr |
Jais2MLP.act_fn |
1 | 0 | 0 |
meth |
Jais2ForCausalLM.init |
3 | 2 | 0 |
meth |
Jais2ForCausalLM._init_model |
3 | 2 | 0 |
attr |
Jais2ForCausalLM.config |
1 | 0 | 0 |
attr |
Jais2ForCausalLM.model |
1 | 0 | 0 |
attr |
Jais2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Jais2ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Jais2ForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
Jais2Model.init |
4 | 3 | 0 |
attr |
Jais2Model.config |
1 | 0 | 0 |
attr |
Jais2Model.quant_config |
1 | 0 | 0 |
attr |
Jais2Model.vocab_size |
1 | 0 | 0 |
attr |
Jais2Model.org_vocab_size |
1 | 0 | 0 |
attr |
Jais2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Jais2Model.embed_tokens |
1 | 0 | 0 |
attr |
Jais2Model.norm |
1 | 0 | 0 |
attr |
Jais2Attention.hidden_size |
1 | 0 | 0 |
attr |
Jais2Attention.total_num_heads |
1 | 0 | 0 |
attr |
Jais2Attention.num_heads |
1 | 0 | 0 |
attr |
Jais2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Jais2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Jais2Attention.head_dim |
1 | 0 | 0 |
attr |
Jais2Attention.q_size |
1 | 0 | 0 |
attr |
Jais2Attention.kv_size |
1 | 0 | 0 |
attr |
Jais2Attention.scaling |
1 | 0 | 0 |
attr |
Jais2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Jais2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Jais2Attention.o_proj |
1 | 0 | 0 |
attr |
Jais2Attention.rotary_emb |
1 | 0 | 0 |
attr |
Jais2Attention.attn |
1 | 0 | 0 |
vllm.model_executor.models.jamba (60 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JambaMambaDecoderLayer.init |
9 | 8 | 0 |
meth |
JambaMambaDecoderLayer.forward |
4 | 2 | 0 |
attr |
JambaMambaDecoderLayer.config |
1 | 0 | 0 |
attr |
JambaMambaDecoderLayer.is_lora_enabled |
1 | 0 | 0 |
attr |
JambaMambaDecoderLayer.mamba |
1 | 0 | 0 |
attr |
JambaMambaDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
JambaMambaDecoderLayer.pre_ff_layernorm |
1 | 0 | 0 |
attr |
JambaMambaDecoderLayer.feed_forward |
1 | 0 | 0 |
meth |
JambaForCausalLM.init |
3 | 2 | 0 |
meth |
JambaForCausalLM.forward |
6 | 4 | 0 |
meth |
JambaForCausalLM.copy_inputs_before_cuda_graphs |
3 | 0 | 0 |
meth |
JambaForCausalLM.get_seqlen_agnostic_capture_inputs |
2 | 1 | 0 |
attr |
JambaForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
JambaForCausalLM.config |
1 | 0 | 0 |
attr |
JambaForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
JambaForCausalLM.model_config |
1 | 0 | 0 |
attr |
JambaForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
JambaForCausalLM.model |
1 | 0 | 0 |
attr |
JambaForCausalLM.lm_head |
1 | 0 | 0 |
attr |
JambaForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
JambaForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
JambaAttentionDecoderLayer.init |
8 | 7 | 0 |
meth |
JambaAttentionDecoderLayer.self_attention |
4 | 3 | 0 |
meth |
JambaAttentionDecoderLayer.forward |
5 | 3 | 0 |
attr |
JambaAttentionDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.total_num_heads |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.num_heads |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.total_num_kv_heads |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.num_kv_heads |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.head_dim |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.q_size |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.kv_size |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.scaling |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.qkv_proj |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.o_proj |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.attn |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.pre_ff_layernorm |
1 | 0 | 0 |
attr |
JambaAttentionDecoderLayer.feed_forward |
1 | 0 | 0 |
meth |
JambaMoE.init |
8 | 7 | 0 |
attr |
JambaMoE.num_total_experts |
1 | 0 | 0 |
attr |
JambaMoE.top_k |
1 | 0 | 0 |
attr |
JambaMoE.hidden_size |
1 | 0 | 0 |
attr |
JambaMoE.intermediate_size |
1 | 0 | 0 |
attr |
JambaMoE.experts |
1 | 0 | 0 |
attr |
JambaMoE.router |
1 | 0 | 0 |
meth |
JambaModel.init |
3 | 2 | 0 |
attr |
JambaModel.config |
1 | 0 | 0 |
attr |
JambaModel.vocab_size |
1 | 0 | 0 |
attr |
JambaModel.embed_tokens |
1 | 0 | 0 |
attr |
JambaModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
JambaModel.final_layernorm |
1 | 0 | 0 |
meth |
JambaForSequenceClassification.init |
3 | 2 | 0 |
attr |
JambaForSequenceClassification.score |
1 | 0 | 0 |
attr |
JambaForSequenceClassification.pooler |
1 | 0 | 0 |
vllm.model_executor.models.jina_vl (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JinaVLForSequenceClassification.init |
3 | 2 | 0 |
meth |
JinaVLForSequenceClassification.load_weights |
2 | 1 | 0 |
attr |
JinaVLForSequenceClassification.weight_mapper |
1 | 0 | 0 |
attr |
JinaVLForSequenceClassification.score |
1 | 0 | 0 |
attr |
JinaVLForSequenceClassification.pooler |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
JinaVLScorer.init |
3 | 2 | 0 |
meth |
JinaVLScorer.forward |
3 | 0 | 0 |
attr |
JinaVLScorer.dense |
1 | 0 | 0 |
attr |
JinaVLScorer.out_proj |
1 | 0 | 0 |
vllm.model_executor.models.kanana_v (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KananaVForConditionalGeneration.init |
3 | 2 | 0 |
meth |
KananaVForConditionalGeneration.forward |
6 | 4 | 0 |
attr |
KananaVForConditionalGeneration.config |
1 | 0 | 0 |
attr |
KananaVForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
KananaVForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
KananaVForConditionalGeneration.abstractor |
1 | 0 | 0 |
attr |
KananaVForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
PatchMerge.merge_size |
1 | 0 | 0 |
meth |
DynamicCAbstractor._load_from_state_dict |
4 | 1 | 0 |
attr |
DynamicCAbstractor.config |
1 | 0 | 0 |
attr |
DynamicCAbstractor.merge_size |
1 | 0 | 0 |
attr |
DynamicCAbstractor.pos_emb_size |
1 | 0 | 0 |
attr |
DynamicCAbstractor.num_input_tokens |
1 | 0 | 0 |
attr |
DynamicCAbstractor.pos_emb |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.keye (78 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
smart_resize |
6 | 5 | 0 |
meth |
KeyeVisionEmbeddings.init |
2 | 1 | 0 |
meth |
KeyeVisionEmbeddings.fetch_position_embedding_lfu_cache |
5 | 1 | 0 |
meth |
KeyeVisionEmbeddings.forward |
5 | 4 | 0 |
attr |
KeyeVisionEmbeddings.config |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.cache_position_embedding |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.cache_position_count |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.position_embedding |
1 | 0 | 0 |
attr |
KeyeVisionEmbeddings.packing_position_embedding |
1 | 0 | 0 |
meth |
BaseKeyeModule.init |
3 | 2 | 0 |
meth |
BaseKeyeModule._process_image_input |
2 | 2 | 1 |
attr |
BaseKeyeModule.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
BaseKeyeModule.config |
1 | 0 | 0 |
attr |
BaseKeyeModule.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
BaseKeyeModule.visual |
1 | 0 | 0 |
attr |
BaseKeyeModule.mlp_AR |
1 | 0 | 0 |
attr |
BaseKeyeModule.language_model |
1 | 0 | 0 |
meth |
Projector.init |
5 | 4 | 0 |
attr |
Projector.text_config |
1 | 0 | 0 |
attr |
Projector.vision_config |
1 | 0 | 0 |
attr |
Projector.merge_kernel_size |
1 | 0 | 0 |
attr |
Projector.hidden_size |
1 | 0 | 0 |
attr |
Projector.pre_norm |
1 | 0 | 0 |
attr |
Projector.act |
1 | 0 | 0 |
attr |
Projector.linear_1 |
1 | 0 | 0 |
attr |
Projector.linear_2 |
1 | 0 | 0 |
meth |
KeyeSiglipEncoder.init |
4 | 3 | 0 |
meth |
KeyeSiglipEncoder.flatten_list |
2 | 0 | 0 |
meth |
KeyeSiglipEncoder.forward |
12 | 11 | 0 |
attr |
KeyeSiglipEncoder.config |
1 | 0 | 0 |
attr |
KeyeSiglipEncoder.layers |
1 | 0 | 0 |
attr |
KeyeSiglipEncoder.rotary_pos_emb |
1 | 0 | 0 |
meth |
KeyeSiglipAttention.init |
4 | 3 | 0 |
attr |
KeyeSiglipAttention.config |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.hidden_size |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.total_num_heads |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.num_heads |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.num_kv_heads |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.head_dim |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.q_size |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.kv_size |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.scale |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.qkv_proj |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.out_proj |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.attn |
1 | 0 | 0 |
attr |
KeyeSiglipAttention.apply_rotary_emb |
1 | 0 | 0 |
meth |
SigLIPRotaryEmbedding.rope_init |
1 | 0 | 0 |
attr |
SigLIPRotaryEmbedding.dim |
1 | 0 | 0 |
attr |
SigLIPRotaryEmbedding.theta |
1 | 0 | 0 |
meth |
KeyeProcessingInfo.get_image_processor |
2 | 1 | 0 |
meth |
KeyeProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
KeyeSiglipVisionTransformer.init |
4 | 3 | 0 |
meth |
KeyeSiglipVisionTransformer.forward |
18 | 17 | 0 |
attr |
KeyeSiglipVisionTransformer.config |
1 | 0 | 0 |
attr |
KeyeSiglipVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
KeyeSiglipVisionTransformer.encoder |
1 | 0 | 0 |
attr |
KeyeSiglipVisionTransformer.post_layernorm |
1 | 0 | 0 |
meth |
KeyeSiglipEncoderLayer.init |
4 | 3 | 0 |
attr |
KeyeSiglipEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
KeyeSiglipEncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
KeyeSiglipEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
KeyeSiglipEncoderLayer.layer_norm2 |
1 | 0 | 0 |
attr |
KeyeSiglipEncoderLayer.mlp |
1 | 0 | 0 |
meth |
KeyeSiglipVisionModel.init |
4 | 3 | 0 |
meth |
KeyeSiglipVisionModel.forward |
13 | 12 | 0 |
attr |
KeyeSiglipVisionModel.vision_model |
1 | 0 | 0 |
attr |
KeyeSiglipVisionModel.quant_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.keye_vl1_5 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KeyeVL1_5ForConditionalGeneration.init |
3 | 2 | 0 |
attr |
KeyeVL1_5ForConditionalGeneration.merge_size |
1 | 0 | 0 |
meth |
KeyeVL1_5Projector.init |
5 | 4 | 0 |
attr |
KeyeVL1_5Projector.text_config |
1 | 0 | 0 |
attr |
KeyeVL1_5Projector.vision_config |
1 | 0 | 0 |
attr |
KeyeVL1_5Projector.merge_kernel_size |
1 | 0 | 0 |
attr |
KeyeVL1_5Projector.hidden_size |
1 | 0 | 0 |
attr |
KeyeVL1_5Projector.pre_norm |
1 | 0 | 0 |
attr |
KeyeVL1_5Projector.act |
1 | 0 | 0 |
attr |
KeyeVL1_5Projector.linear_1 |
1 | 0 | 0 |
attr |
KeyeVL1_5Projector.linear_2 |
1 | 0 | 0 |
meth |
KeyeVL1_5ProcessingInfo.get_data_parser |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.kimi_k25 (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KimiK25MultiModalProcessor.split_video_chunks |
2 | 0 | 0 |
meth |
KimiK25DummyInputsBuilder.get_dummy_mm_items |
1 | 0 | 0 |
attr |
KimiK25DummyInputsBuilder.media_token_id |
1 | 0 | 0 |
attr |
KimiK25DummyInputsBuilder.frame_per_chunk |
1 | 0 | 0 |
meth |
KimiK25ForConditionalGeneration._maybe_ignore_quant_config |
2 | 1 | 0 |
meth |
KimiK25ForConditionalGeneration.compute_logits |
3 | 2 | 0 |
meth |
KimiK25ForConditionalGeneration.load_weights |
2 | 1 | 0 |
attr |
KimiK25ForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.hidden_size |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.device |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.mm_projector |
1 | 0 | 0 |
attr |
KimiK25ForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
MoonshotKimiVAutoProcessor.init |
4 | 1 | 0 |
meth |
MoonshotKimiVAutoProcessor.call |
4 | 3 | 0 |
attr |
MoonshotKimiVAutoProcessor.media_processor |
1 | 0 | 0 |
attr |
MoonshotKimiVAutoProcessor.media_token_id |
1 | 0 | 0 |
meth |
KimiK25ProcessingInfo.get_hf_processor |
1 | 0 | 0 |
meth |
KimiK25ProcessingInfo.get_hf_config |
1 | 0 | 0 |
attr |
KimiK25ProcessingInfo.hf_config |
1 | 0 | 0 |
attr |
KimiK25ProcessingInfo.media_token_id |
1 | 0 | 0 |
attr |
KimiK25ProcessingInfo.media_processor |
1 | 0 | 0 |
attr |
KimiK25ProcessingInfo.hf_processor |
1 | 0 | 0 |
attr |
KimiK25ProcessingInfo.media_tokens_calculator |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.kimi_k25_vit (73 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_rope_shape_decorate |
2 | 0 | 0 |
func |
get_1d_sincos_pos_embed |
4 | 0 | 0 |
func |
vision_tower_forward |
6 | 6 | 2 |
meth |
KimiK25MultiModalProjector.init |
5 | 4 | 0 |
attr |
KimiK25MultiModalProjector.use_data_parallel |
1 | 0 | 0 |
attr |
KimiK25MultiModalProjector.hidden_size |
1 | 0 | 0 |
attr |
KimiK25MultiModalProjector.pre_norm |
1 | 0 | 0 |
attr |
KimiK25MultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
KimiK25MultiModalProjector.linear_2 |
1 | 0 | 0 |
attr |
KimiK25MultiModalProjector.act |
1 | 0 | 0 |
func |
mm_projector_forward |
3 | 2 | 0 |
func |
get_1d_sincos_pos_embed_from_grid |
3 | 0 | 0 |
meth |
MoonVision3dPatchEmbed.init |
8 | 7 | 0 |
attr |
MoonVision3dPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
MoonVision3dPatchEmbed.proj |
1 | 0 | 0 |
attr |
MoonVision3dPatchEmbed.pos_emb |
1 | 0 | 0 |
meth |
MLP2.init |
7 | 5 | 0 |
attr |
MLP2.use_data_parallel |
1 | 0 | 0 |
attr |
MLP2.fc0 |
1 | 0 | 0 |
attr |
MLP2.fc1 |
1 | 0 | 0 |
attr |
MLP2.activation |
1 | 0 | 0 |
attr |
MoonViT3dEncoder.video_attn_type |
1 | 0 | 0 |
attr |
MoonViT3dEncoder.rope_2d |
1 | 0 | 0 |
attr |
MoonViT3dEncoder.blocks |
1 | 0 | 0 |
attr |
MoonViT3dEncoder.final_layernorm |
1 | 0 | 0 |
meth |
Rope2DPosEmbRepeated.init |
5 | 3 | 0 |
meth |
Rope2DPosEmbRepeated.extra_repr |
1 | 0 | 0 |
attr |
Rope2DPosEmbRepeated.dim |
1 | 0 | 0 |
attr |
Rope2DPosEmbRepeated.max_height |
1 | 0 | 0 |
attr |
Rope2DPosEmbRepeated.max_width |
1 | 0 | 0 |
attr |
Rope2DPosEmbRepeated.theta_base |
1 | 0 | 0 |
meth |
MoonViTEncoderLayer.init |
8 | 6 | 0 |
meth |
MoonViTEncoderLayer.attention_qkvpacked |
4 | 3 | 0 |
meth |
MoonViTEncoderLayer.forward |
4 | 3 | 0 |
attr |
MoonViTEncoderLayer.use_data_parallel |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.num_heads |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.hidden_dim |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.tp_size |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.norm0 |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.norm1 |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.mlp |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.wqkv |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.wo |
1 | 0 | 0 |
attr |
MoonViTEncoderLayer.attn |
1 | 0 | 0 |
meth |
Learnable2DInterpPosEmbDivided_fixed.reset_parameters |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmbDivided_fixed.height |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmbDivided_fixed.width |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmbDivided_fixed.num_frames |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmbDivided_fixed.dim |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmbDivided_fixed.interpolation_mode |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmbDivided_fixed.weight |
1 | 0 | 0 |
func |
get_rope_shape |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
MoonViT3dPretrainedModel.init |
4 | 3 | 0 |
attr |
MoonViT3dPretrainedModel.config |
1 | 0 | 0 |
attr |
MoonViT3dPretrainedModel.merge_kernel_size |
1 | 0 | 0 |
attr |
MoonViT3dPretrainedModel.patch_size |
1 | 0 | 0 |
attr |
MoonViT3dPretrainedModel.merge_type |
1 | 0 | 0 |
attr |
MoonViT3dPretrainedModel.patch_embed |
1 | 0 | 0 |
attr |
MoonViT3dPretrainedModel.encoder |
1 | 0 | 0 |
vllm.model_executor.models.kimi_linear (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KimiMLAAttention.init |
14 | 13 | 0 |
attr |
KimiMLAAttention.hidden_size |
1 | 0 | 0 |
attr |
KimiMLAAttention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
KimiMLAAttention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
KimiMLAAttention.qk_head_dim |
1 | 0 | 0 |
attr |
KimiMLAAttention.v_head_dim |
1 | 0 | 0 |
attr |
KimiMLAAttention.q_lora_rank |
1 | 0 | 0 |
attr |
KimiMLAAttention.kv_lora_rank |
1 | 0 | 0 |
attr |
KimiMLAAttention.num_heads |
1 | 0 | 0 |
attr |
KimiMLAAttention.num_local_heads |
1 | 0 | 0 |
attr |
KimiMLAAttention.scaling |
1 | 0 | 0 |
attr |
KimiMLAAttention.use_nope |
1 | 0 | 0 |
attr |
KimiMLAAttention.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
KimiMLAAttention.q_proj |
1 | 0 | 0 |
attr |
KimiMLAAttention.kv_a_layernorm |
1 | 0 | 0 |
attr |
KimiMLAAttention.kv_b_proj |
1 | 0 | 0 |
attr |
KimiMLAAttention.o_proj |
1 | 0 | 0 |
attr |
KimiMLAAttention.mla_attn |
1 | 0 | 0 |
meth |
KimiLinearModel.init |
3 | 2 | 0 |
meth |
KimiLinearModel.forward |
6 | 5 | 0 |
attr |
KimiLinearModel.config |
1 | 0 | 0 |
attr |
KimiLinearModel.vocab_size |
1 | 0 | 0 |
attr |
KimiLinearModel.embed_tokens |
1 | 0 | 0 |
attr |
KimiLinearModel.norm |
1 | 0 | 0 |
meth |
KimiLinearForCausalLM.init |
3 | 2 | 0 |
meth |
KimiLinearForCausalLM.forward |
6 | 5 | 0 |
meth |
KimiLinearForCausalLM.load_weights |
2 | 1 | 0 |
attr |
KimiLinearForCausalLM.model_config |
1 | 0 | 0 |
attr |
KimiLinearForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
KimiLinearForCausalLM.config |
1 | 0 | 0 |
attr |
KimiLinearForCausalLM.quant_config |
1 | 0 | 0 |
attr |
KimiLinearForCausalLM.model |
1 | 0 | 0 |
attr |
KimiLinearForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
KimiLinearForCausalLM.lm_head |
1 | 0 | 0 |
meth |
KimiDecoderLayer.init |
9 | 8 | 0 |
meth |
KimiDecoderLayer.forward |
5 | 4 | 0 |
attr |
KimiDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
KimiDecoderLayer.is_moe |
1 | 0 | 0 |
attr |
KimiDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
KimiDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
KimiDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
KimiDecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
KimiDecoderLayer.mlp |
1 | 0 | 0 |
meth |
KimiMLP.forward |
2 | 0 | 0 |
attr |
KimiMLP.gate_up_proj |
1 | 0 | 0 |
attr |
KimiMLP.down_proj |
1 | 0 | 0 |
attr |
KimiMLP.act_fn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
KimiMoE.init |
5 | 4 | 0 |
attr |
KimiMoE.tp_size |
1 | 0 | 0 |
attr |
KimiMoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
KimiMoE.num_shared_experts |
1 | 0 | 0 |
attr |
KimiMoE.layer_idx |
1 | 0 | 0 |
attr |
KimiMoE.gate |
1 | 0 | 0 |
attr |
KimiMoE.experts |
1 | 0 | 0 |
attr |
KimiMoE.shared_experts |
1 | 0 | 0 |
vllm.model_executor.models.kimi_vl (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KimiVLForConditionalGeneration.compute_logits |
3 | 2 | 0 |
meth |
KimiVLForConditionalGeneration.load_weights |
2 | 1 | 0 |
attr |
KimiVLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
KimiVLForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
KimiVLForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
KimiVLForConditionalGeneration.hidden_size |
1 | 0 | 0 |
attr |
KimiVLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
KimiVLForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
KimiVLForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
KimiVLForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
KimiVLMultiModalProjector.init |
3 | 2 | 0 |
attr |
KimiVLMultiModalProjector.use_data_parallel |
1 | 0 | 0 |
attr |
KimiVLMultiModalProjector.hidden_size |
1 | 0 | 0 |
attr |
KimiVLMultiModalProjector.pre_norm |
1 | 0 | 0 |
attr |
KimiVLMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
KimiVLMultiModalProjector.linear_2 |
1 | 0 | 0 |
attr |
KimiVLMultiModalProjector.act |
1 | 0 | 0 |
meth |
KimiVLProcessingInfo.get_hf_config |
1 | 0 | 0 |
vllm.model_executor.models.lfm2 (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2Model.init |
3 | 2 | 0 |
attr |
Lfm2Model.config |
1 | 0 | 0 |
attr |
Lfm2Model.vocab_size |
1 | 0 | 0 |
attr |
Lfm2Model.embed_tokens |
1 | 0 | 0 |
attr |
Lfm2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Lfm2Model.embedding_norm |
1 | 0 | 0 |
meth |
Lfm2MLP.init |
8 | 7 | 0 |
attr |
Lfm2MLP.w13 |
1 | 0 | 0 |
attr |
Lfm2MLP.w2 |
1 | 0 | 0 |
attr |
Lfm2MLP.act_fn |
1 | 0 | 0 |
meth |
Lfm2AttentionDecoderLayer.forward |
5 | 4 | 0 |
attr |
Lfm2AttentionDecoderLayer.prefix |
1 | 0 | 0 |
attr |
Lfm2AttentionDecoderLayer.config |
1 | 0 | 0 |
attr |
Lfm2AttentionDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Lfm2AttentionDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Lfm2AttentionDecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
Lfm2AttentionDecoderLayer.operator_norm |
1 | 0 | 0 |
attr |
Lfm2AttentionDecoderLayer.ffn_norm |
1 | 0 | 0 |
meth |
Lfm2ShortConvDecoderLayer.forward |
4 | 2 | 0 |
attr |
Lfm2ShortConvDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Lfm2ShortConvDecoderLayer.short_conv |
1 | 0 | 0 |
attr |
Lfm2ShortConvDecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
Lfm2ShortConvDecoderLayer.operator_norm |
1 | 0 | 0 |
attr |
Lfm2ShortConvDecoderLayer.ffn_norm |
1 | 0 | 0 |
attr |
Lfm2Attention.layer_idx |
1 | 0 | 0 |
attr |
Lfm2Attention.hidden_size |
1 | 0 | 0 |
attr |
Lfm2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Lfm2Attention.total_num_heads |
1 | 0 | 0 |
attr |
Lfm2Attention.num_heads |
1 | 0 | 0 |
attr |
Lfm2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Lfm2Attention.head_dim |
1 | 0 | 0 |
attr |
Lfm2Attention.q_size |
1 | 0 | 0 |
attr |
Lfm2Attention.kv_size |
1 | 0 | 0 |
attr |
Lfm2Attention.scaling |
1 | 0 | 0 |
attr |
Lfm2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Lfm2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Lfm2Attention.out_proj |
1 | 0 | 0 |
attr |
Lfm2Attention.rotary_emb |
1 | 0 | 0 |
attr |
Lfm2Attention.attn |
1 | 0 | 0 |
attr |
Lfm2Attention.q_layernorm |
1 | 0 | 0 |
attr |
Lfm2Attention.k_layernorm |
1 | 0 | 0 |
meth |
Lfm2ForCausalLM.forward |
6 | 5 | 0 |
attr |
Lfm2ForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Lfm2ForCausalLM.config |
1 | 0 | 0 |
attr |
Lfm2ForCausalLM.model |
1 | 0 | 0 |
attr |
Lfm2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Lfm2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Lfm2ForCausalLM.lm_head |
1 | 0 | 0 |
vllm.model_executor.models.lfm2_moe (76 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2MoeForCausalLM.forward |
6 | 5 | 0 |
attr |
Lfm2MoeForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.config |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.expert_weights |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.moe_layers |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.num_expert_groups |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.num_shared_experts |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.num_logical_experts |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.num_physical_experts |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.num_local_physical_experts |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.num_routed_experts |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.num_redundant_experts |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Lfm2MoeSparseMoeBlock.init |
5 | 4 | 0 |
attr |
Lfm2MoeSparseMoeBlock.tp_size |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.routed_scaling_factor |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.ep_group |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.ep_rank |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.ep_size |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.n_routed_experts |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.enable_eplb |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.n_logical_experts |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.n_redundant_experts |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.n_physical_experts |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.n_local_physical_experts |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.physical_expert_start |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.physical_expert_end |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.gate |
1 | 0 | 0 |
attr |
Lfm2MoeSparseMoeBlock.experts |
1 | 0 | 0 |
meth |
Lfm2MoeModel.init |
3 | 2 | 0 |
attr |
Lfm2MoeModel.num_redundant_experts |
1 | 0 | 0 |
attr |
Lfm2MoeModel.config |
1 | 0 | 0 |
attr |
Lfm2MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Lfm2MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Lfm2MoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Lfm2MoeModel.embedding_norm |
1 | 0 | 0 |
meth |
Lfm2MoeMlp.init |
5 | 4 | 0 |
attr |
Lfm2MoeMlp.w13 |
1 | 0 | 0 |
attr |
Lfm2MoeMlp.w2 |
1 | 0 | 0 |
attr |
Lfm2MoeMlp.act_fn |
1 | 0 | 0 |
meth |
Lfm2MoeAttentionDecoderLayer.forward |
5 | 4 | 0 |
attr |
Lfm2MoeAttentionDecoderLayer.prefix |
1 | 0 | 0 |
attr |
Lfm2MoeAttentionDecoderLayer.config |
1 | 0 | 0 |
attr |
Lfm2MoeAttentionDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Lfm2MoeAttentionDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Lfm2MoeAttentionDecoderLayer.operator_norm |
1 | 0 | 0 |
attr |
Lfm2MoeAttentionDecoderLayer.ffn_norm |
1 | 0 | 0 |
attr |
Lfm2MoeAttentionDecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.layer_idx |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.hidden_size |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.num_heads |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.head_dim |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.q_size |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.kv_size |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.scaling |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.out_proj |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.attn |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.q_layernorm |
1 | 0 | 0 |
attr |
Lfm2MoeAttention.k_layernorm |
1 | 0 | 0 |
meth |
Lfm2MoeShortConvDecoderLayer.forward |
4 | 2 | 0 |
attr |
Lfm2MoeShortConvDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Lfm2MoeShortConvDecoderLayer.short_conv |
1 | 0 | 0 |
attr |
Lfm2MoeShortConvDecoderLayer.operator_norm |
1 | 0 | 0 |
attr |
Lfm2MoeShortConvDecoderLayer.ffn_norm |
1 | 0 | 0 |
attr |
Lfm2MoeShortConvDecoderLayer.feed_forward |
1 | 0 | 0 |
vllm.model_executor.models.lfm2_siglip2 (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2Model.init |
6 | 5 | 0 |
attr |
Siglip2Model.vision_model |
1 | 0 | 0 |
meth |
Siglip2EncoderLayer.init |
4 | 3 | 0 |
attr |
Siglip2EncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.self_attn |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.layer_norm2 |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.mlp |
1 | 0 | 0 |
meth |
Siglip2VisionEmbeddings.init |
2 | 1 | 0 |
attr |
Siglip2VisionEmbeddings.config |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.position_embedding_size |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.position_embedding |
1 | 0 | 0 |
meth |
Siglip2Encoder.init |
5 | 4 | 0 |
attr |
Siglip2Encoder.config |
1 | 0 | 0 |
attr |
Siglip2Encoder.layers |
1 | 0 | 0 |
meth |
Siglip2Attention.init |
4 | 3 | 0 |
attr |
Siglip2Attention.config |
1 | 0 | 0 |
attr |
Siglip2Attention.embed_dim |
1 | 0 | 0 |
attr |
Siglip2Attention.num_heads |
1 | 0 | 0 |
attr |
Siglip2Attention.head_dim |
1 | 0 | 0 |
attr |
Siglip2Attention.scale |
1 | 0 | 0 |
attr |
Siglip2Attention.dropout |
1 | 0 | 0 |
attr |
Siglip2Attention.num_heads_per_partition |
1 | 0 | 0 |
attr |
Siglip2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Siglip2Attention.out_proj |
1 | 0 | 0 |
attr |
Siglip2Attention.attn |
1 | 0 | 0 |
meth |
Siglip2MLP.init |
4 | 3 | 0 |
attr |
Siglip2MLP.config |
1 | 0 | 0 |
attr |
Siglip2MLP.activation_fn |
1 | 0 | 0 |
attr |
Siglip2MLP.fc1 |
1 | 0 | 0 |
attr |
Siglip2MLP.fc2 |
1 | 0 | 0 |
meth |
Siglip2VisionTransformer.init |
6 | 5 | 0 |
meth |
Siglip2VisionTransformer.get_input_embeddings |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.config |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.embeddings |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.encoder |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.post_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.lfm2_vl (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2VLMultiModalProjector.init |
3 | 2 | 0 |
attr |
Lfm2VLMultiModalProjector.use_data_parallel |
1 | 0 | 0 |
attr |
Lfm2VLMultiModalProjector.factor |
1 | 0 | 0 |
attr |
Lfm2VLMultiModalProjector.projector_use_layernorm |
1 | 0 | 0 |
attr |
Lfm2VLMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
Lfm2VLMultiModalProjector.act |
1 | 0 | 0 |
attr |
Lfm2VLMultiModalProjector.linear_2 |
1 | 0 | 0 |
attr |
Lfm2VLMultiModalProjector.layer_norm |
1 | 0 | 0 |
meth |
Lfm2VLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Lfm2VLProcessingInfo.get_hf_processor |
2 | 0 | 0 |
meth |
Lfm2VLForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Lfm2VLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Lfm2VLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Lfm2VLForConditionalGeneration.vllm_config |
1 | 0 | 0 |
attr |
Lfm2VLForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Lfm2VLForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Lfm2VLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Lfm2VLForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
Lfm2VLForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Lfm2VLForConditionalGeneration.vision_tower |
1 | 0 | 0 |
vllm.model_executor.models.lightonocr (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LightOnOCRForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
LightOnOCRForConditionalGeneration.config |
1 | 0 | 0 |
attr |
LightOnOCRForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
LightOnOCRForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
LightOnOCRForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
LightOnOCRForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
LightOnOCRForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.llama (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LlamaDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
LlamaDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
LlamaDecoderLayer.mlp |
1 | 0 | 0 |
attr |
LlamaDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
LlamaDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
func |
llama_model_invariants |
5 | 0 | 0 |
attr |
LlamaAttention.hidden_size |
1 | 0 | 0 |
attr |
LlamaAttention.total_num_heads |
1 | 0 | 0 |
attr |
LlamaAttention.num_heads |
1 | 0 | 0 |
attr |
LlamaAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
LlamaAttention.num_kv_heads |
1 | 0 | 0 |
attr |
LlamaAttention.head_dim |
1 | 0 | 0 |
attr |
LlamaAttention.q_size |
1 | 0 | 0 |
attr |
LlamaAttention.kv_size |
1 | 0 | 0 |
attr |
LlamaAttention.scaling |
1 | 0 | 0 |
attr |
LlamaAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
LlamaAttention.qkv_proj |
1 | 0 | 0 |
attr |
LlamaAttention.o_proj |
1 | 0 | 0 |
attr |
LlamaAttention.attn |
1 | 0 | 0 |
meth |
LlamaModel.init |
4 | 3 | 0 |
meth |
LlamaModel.forward |
6 | 5 | 0 |
attr |
LlamaModel.config |
1 | 0 | 0 |
attr |
LlamaModel.quant_config |
1 | 0 | 0 |
attr |
LlamaModel.vocab_size |
1 | 0 | 0 |
attr |
LlamaModel.aux_hidden_state_layers |
1 | 0 | 0 |
attr |
LlamaModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
LlamaModel.embed_tokens |
1 | 0 | 0 |
attr |
LlamaModel.norm |
1 | 0 | 0 |
meth |
LlamaForCausalLM.init |
4 | 3 | 0 |
meth |
LlamaForCausalLM._init_model |
4 | 3 | 0 |
attr |
LlamaForCausalLM.config |
1 | 0 | 0 |
attr |
LlamaForCausalLM.model |
1 | 0 | 0 |
attr |
LlamaForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
LlamaForCausalLM.lm_head |
1 | 0 | 0 |
attr |
LlamaForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
LlamaMLP.forward |
2 | 0 | 0 |
attr |
LlamaMLP.gate_up_proj |
1 | 0 | 0 |
attr |
LlamaMLP.down_proj |
1 | 0 | 0 |
attr |
LlamaMLP.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.llama4 (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama4MoE.init |
3 | 2 | 0 |
meth |
Llama4MoE.forward |
2 | 0 | 0 |
attr |
Llama4MoE.tp_size |
1 | 0 | 0 |
attr |
Llama4MoE.top_k |
1 | 0 | 0 |
attr |
Llama4MoE.is_sequence_parallel |
1 | 0 | 0 |
attr |
Llama4MoE.ep_group |
1 | 0 | 0 |
attr |
Llama4MoE.ep_rank |
1 | 0 | 0 |
attr |
Llama4MoE.ep_size |
1 | 0 | 0 |
attr |
Llama4MoE.router |
1 | 0 | 0 |
attr |
Llama4MoE.shared_expert |
1 | 0 | 0 |
attr |
Llama4MoE.enable_eplb |
1 | 0 | 0 |
attr |
Llama4MoE.n_redundant_experts |
1 | 0 | 0 |
attr |
Llama4MoE.n_logical_experts |
1 | 0 | 0 |
attr |
Llama4MoE.n_physical_experts |
1 | 0 | 0 |
attr |
Llama4MoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
Llama4MoE.experts |
1 | 0 | 0 |
attr |
Llama4Attention.layer_idx |
1 | 0 | 0 |
attr |
Llama4Attention.hidden_size |
1 | 0 | 0 |
attr |
Llama4Attention.no_rope_layers |
1 | 0 | 0 |
attr |
Llama4Attention.nope |
1 | 0 | 0 |
attr |
Llama4Attention.use_qk_norm |
1 | 0 | 0 |
attr |
Llama4Attention.total_num_heads |
1 | 0 | 0 |
attr |
Llama4Attention.num_heads |
1 | 0 | 0 |
attr |
Llama4Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Llama4Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Llama4Attention.head_dim |
1 | 0 | 0 |
attr |
Llama4Attention.q_size |
1 | 0 | 0 |
attr |
Llama4Attention.kv_size |
1 | 0 | 0 |
attr |
Llama4Attention.scaling |
1 | 0 | 0 |
attr |
Llama4Attention.attn_temperature_tuning |
1 | 0 | 0 |
attr |
Llama4Attention.floor_scale |
1 | 0 | 0 |
attr |
Llama4Attention.attn_scale |
1 | 0 | 0 |
attr |
Llama4Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Llama4Attention.n_rep |
1 | 0 | 0 |
attr |
Llama4Attention.qk_norm |
1 | 0 | 0 |
attr |
Llama4Attention.qkv_proj |
1 | 0 | 0 |
attr |
Llama4Attention.o_proj |
1 | 0 | 0 |
attr |
Llama4Attention.rotary_emb |
1 | 0 | 0 |
attr |
Llama4Attention.attn |
1 | 0 | 0 |
meth |
Llama4ForCausalLM.init |
3 | 2 | 0 |
meth |
Llama4ForCausalLM.set_moe_parameters |
1 | 0 | 0 |
meth |
Llama4ForCausalLM._init_model |
4 | 3 | 0 |
meth |
Llama4Model.init |
4 | 3 | 0 |
attr |
Llama4Model.num_experts |
1 | 0 | 0 |
attr |
Llama4Model.n_redundant_experts |
1 | 0 | 0 |
attr |
Llama4DecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Llama4DecoderLayer.global_layer |
1 | 0 | 0 |
attr |
Llama4DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Llama4DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Llama4DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Llama4DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Llama4DecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.llama4_eagle (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
EagleLlama4ForCausalLM.init |
3 | 2 | 0 |
attr |
EagleLlama4ForCausalLM.config |
1 | 0 | 0 |
attr |
EagleLlama4ForCausalLM.model |
1 | 0 | 0 |
attr |
EagleLlama4ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
EagleLlama4ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
LlamaModel.config |
1 | 0 | 0 |
attr |
LlamaModel.vocab_size |
1 | 0 | 0 |
attr |
LlamaModel.embed_tokens |
1 | 0 | 0 |
attr |
LlamaModel.fc |
1 | 0 | 0 |
attr |
LlamaModel.norm |
1 | 0 | 0 |
attr |
LlamaModel.layers |
1 | 0 | 0 |
vllm.model_executor.models.llama_eagle (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LlamaDecoderLayer.input_layernorm |
1 | 0 | 0 |
meth |
EagleLlamaForCausalLM.init |
3 | 2 | 0 |
meth |
EagleLlamaForCausalLM.load_weights |
2 | 1 | 0 |
attr |
EagleLlamaForCausalLM.config |
1 | 0 | 0 |
attr |
EagleLlamaForCausalLM.model |
1 | 0 | 0 |
attr |
EagleLlamaForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
LlamaModel.config |
1 | 0 | 0 |
attr |
LlamaModel.vocab_size |
1 | 0 | 0 |
attr |
LlamaModel.quant_config |
1 | 0 | 0 |
attr |
LlamaModel.embed_tokens |
1 | 0 | 0 |
attr |
LlamaModel.layers |
1 | 0 | 0 |
attr |
LlamaModel.fc |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.llama_eagle3 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LlamaDecoderLayer.hidden_norm |
1 | 0 | 0 |
attr |
LlamaDecoderLayer.layer_idx |
1 | 0 | 0 |
meth |
Eagle3LlamaForCausalLM.init |
3 | 2 | 0 |
meth |
Eagle3LlamaForCausalLM.load_weights |
2 | 1 | 0 |
attr |
Eagle3LlamaForCausalLM.config |
1 | 0 | 0 |
attr |
Eagle3LlamaForCausalLM.model |
1 | 0 | 0 |
attr |
Eagle3LlamaForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Eagle3LlamaForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Eagle3LlamaForCausalLM.draft_id_to_target_id |
1 | 0 | 0 |
attr |
Eagle3LlamaForCausalLM.use_parallel_drafting |
1 | 0 | 0 |
attr |
LlamaModel.config |
1 | 0 | 0 |
attr |
LlamaModel.vocab_size |
1 | 0 | 0 |
attr |
LlamaModel.quant_config |
1 | 0 | 0 |
attr |
LlamaModel.embed_tokens |
1 | 0 | 0 |
attr |
LlamaModel.layers |
1 | 0 | 0 |
attr |
LlamaModel.norm |
1 | 0 | 0 |
attr |
LlamaModel.use_aux_hidden_state |
1 | 0 | 0 |
attr |
LlamaModel.fc |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.llava (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixtralHFProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
BaseLlavaProcessingInfo.get_vision_encoder_info |
1 | 0 | 0 |
attr |
LlavaForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
LlavaForConditionalGeneration.config |
1 | 0 | 0 |
attr |
LlavaForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
LlavaForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
LlavaForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
LlavaForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
LlavaForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
LlavaMultiModalProjector.init |
7 | 6 | 0 |
attr |
LlavaMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
LlavaMultiModalProjector.act |
1 | 0 | 0 |
attr |
LlavaMultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
MantisProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
LlavaProcessingInfo.get_hf_processor |
2 | 1 | 0 |
vllm.model_executor.models.llava_next (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextProcessingInfo.get_hf_processor |
2 | 1 | 0 |
attr |
LlavaNextForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.config |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.select_layers |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.image_newline |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.llava_next_video (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextVideoForConditionalGeneration._process_video_pixels |
2 | 1 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.config |
1 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.vision_resampler |
1 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
LlavaNextVideoProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
LlavaNextVideoProcessingInfo.get_vision_encoder_info |
1 | 0 | 0 |
meth |
LlavaNextVideoProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
LlavaNextVideoPooler.init |
2 | 1 | 0 |
meth |
LlavaNextVideoPooler.forward |
2 | 1 | 0 |
attr |
LlavaNextVideoPooler.image_size |
1 | 0 | 0 |
attr |
LlavaNextVideoPooler.pool |
1 | 0 | 0 |
meth |
LlavaNextMultiModalProjector.init |
5 | 4 | 0 |
attr |
LlavaNextMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
LlavaNextMultiModalProjector.act |
1 | 0 | 0 |
attr |
LlavaNextMultiModalProjector.linear_2 |
1 | 0 | 0 |
vllm.model_executor.models.llava_onevision (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaOnevisionMultiModalProjector.init |
2 | 1 | 0 |
attr |
LlavaOnevisionMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
LlavaOnevisionMultiModalProjector.act |
1 | 0 | 0 |
attr |
LlavaOnevisionMultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
LlavaOnevisionProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
LlavaOnevisionForConditionalGeneration._merge_image_patch_embeddings |
6 | 4 | 0 |
meth |
LlavaOnevisionForConditionalGeneration._process_video_pixels |
2 | 1 | 0 |
meth |
LlavaOnevisionForConditionalGeneration.apply_pooling |
3 | 2 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.config |
1 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.image_newline |
1 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.longcat_flash (104 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
FlashMLP.gate_up_proj |
1 | 0 | 0 |
attr |
FlashMLP.down_proj |
1 | 0 | 0 |
attr |
FlashMLP.act_fn |
1 | 0 | 0 |
attr |
FlashDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
FlashDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
FlashDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
FlashDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
FlashDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
FlashDecoderLayer.mlps |
1 | 0 | 0 |
attr |
FlashDecoderLayer.mlp |
1 | 0 | 0 |
meth |
LongcatMoe.init |
10 | 9 | 0 |
attr |
LongcatMoe.hidden_size |
1 | 0 | 0 |
attr |
LongcatMoe.rounter_params_dtype |
1 | 0 | 0 |
attr |
LongcatMoe.router |
1 | 0 | 0 |
attr |
LongcatMoe.experts |
1 | 0 | 0 |
meth |
LongcatFlashForCausalLM.init |
3 | 2 | 0 |
attr |
LongcatFlashForCausalLM.config |
1 | 0 | 0 |
attr |
LongcatFlashForCausalLM.quant_config |
1 | 0 | 0 |
attr |
LongcatFlashForCausalLM.model |
1 | 0 | 0 |
attr |
LongcatFlashForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
LongcatFlashForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
LongcatFlashForCausalLM.lm_head |
1 | 0 | 0 |
meth |
FlashModel.init |
3 | 2 | 0 |
attr |
FlashModel.config |
1 | 0 | 0 |
attr |
FlashModel.vocab_size |
1 | 0 | 0 |
attr |
FlashModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
FlashModel.embed_tokens |
1 | 0 | 0 |
attr |
FlashModel.norm |
1 | 0 | 0 |
meth |
FlashConfig.init |
40 | 0 | 0 |
attr |
FlashConfig.vocab_size |
1 | 0 | 0 |
attr |
FlashConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FlashConfig.hidden_size |
1 | 0 | 0 |
attr |
FlashConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FlashConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FlashConfig.ep_size |
1 | 0 | 0 |
attr |
FlashConfig.kv_lora_rank |
1 | 0 | 0 |
attr |
FlashConfig.q_lora_rank |
1 | 0 | 0 |
attr |
FlashConfig.qk_rope_head_dim |
1 | 0 | 0 |
attr |
FlashConfig.v_head_dim |
1 | 0 | 0 |
attr |
FlashConfig.qk_nope_head_dim |
1 | 0 | 0 |
attr |
FlashConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
FlashConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
FlashConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
FlashConfig.initializer_range |
1 | 0 | 0 |
attr |
FlashConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
FlashConfig.pretraining_tp |
1 | 0 | 0 |
attr |
FlashConfig.use_cache |
1 | 0 | 0 |
attr |
FlashConfig.rope_parameters |
1 | 0 | 0 |
attr |
FlashConfig.attention_bias |
1 | 0 | 0 |
attr |
FlashConfig.attention_dropout |
1 | 0 | 0 |
attr |
FlashConfig.mla_scale_q_lora |
1 | 0 | 0 |
attr |
FlashConfig.mla_scale_kv_lora |
1 | 0 | 0 |
attr |
FlashConfig.zero_expert_num |
1 | 0 | 0 |
attr |
FlashConfig.zero_expert_type |
1 | 0 | 0 |
attr |
FlashConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
FlashConfig.hidden_act |
1 | 0 | 0 |
attr |
FlashConfig.intermediate_size |
1 | 0 | 0 |
attr |
FlashConfig.moe_intermediate_size |
1 | 0 | 0 |
meth |
LongcatRouter.init |
5 | 4 | 0 |
meth |
LongcatRouter.forward |
2 | 0 | 0 |
attr |
LongcatRouter.n_routed_experts |
1 | 0 | 0 |
attr |
LongcatRouter.classifier |
1 | 0 | 0 |
attr |
LongcatRouter.e_score_correction_bias |
1 | 0 | 0 |
vllm.model_executor.models.longcat_flash_mtp (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongCatFlashMTP.init |
3 | 2 | 0 |
attr |
LongCatFlashMTP.config |
1 | 0 | 0 |
attr |
LongCatFlashMTP.quant_config |
1 | 0 | 0 |
attr |
LongCatFlashMTP.model |
1 | 0 | 0 |
attr |
LongCatFlashMTP.lm_head |
1 | 0 | 0 |
attr |
LongCatFlashMTP.logits_processor |
1 | 0 | 0 |
attr |
LongCatMultiTokenPredictorLayer.enorm |
1 | 0 | 0 |
attr |
LongCatMultiTokenPredictorLayer.hnorm |
1 | 0 | 0 |
attr |
LongCatMultiTokenPredictorLayer.eh_proj |
1 | 0 | 0 |
attr |
LongCatMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
attr |
LongCatMultiTokenPredictorLayer.final_layernorm |
1 | 0 | 0 |
meth |
LongCatMultiTokenPredictor.init |
4 | 3 | 0 |
attr |
LongCatMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
LongCatMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
LongCatMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
LongCatMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
vllm.model_executor.models.mamba (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MambaForCausalLM.init |
3 | 2 | 0 |
meth |
MambaForCausalLM.forward |
6 | 4 | 0 |
meth |
MambaForCausalLM.copy_inputs_before_cuda_graphs |
3 | 0 | 0 |
meth |
MambaForCausalLM.get_seqlen_agnostic_capture_inputs |
2 | 1 | 0 |
attr |
MambaForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
MambaForCausalLM.config |
1 | 0 | 0 |
attr |
MambaForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
MambaForCausalLM.model_config |
1 | 0 | 0 |
attr |
MambaForCausalLM.backbone |
1 | 0 | 0 |
attr |
MambaForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
MambaForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MambaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MambaModel.init |
3 | 2 | 0 |
attr |
MambaModel.config |
1 | 0 | 0 |
attr |
MambaModel.vocab_size |
1 | 0 | 0 |
attr |
MambaModel.embeddings |
1 | 0 | 0 |
attr |
MambaModel.norm_f |
1 | 0 | 0 |
attr |
MambaModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
MambaDecoderLayer.forward |
4 | 2 | 0 |
attr |
MambaDecoderLayer.config |
1 | 0 | 0 |
attr |
MambaDecoderLayer.is_falcon_mamba |
1 | 0 | 0 |
attr |
MambaDecoderLayer.is_lora_enabled |
1 | 0 | 0 |
attr |
MambaDecoderLayer.mixer |
1 | 0 | 0 |
attr |
MambaDecoderLayer.norm |
1 | 0 | 0 |
vllm.model_executor.models.mamba2 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mamba2DecoderLayer.forward |
4 | 2 | 0 |
attr |
Mamba2DecoderLayer.config |
1 | 0 | 0 |
attr |
Mamba2DecoderLayer.mixer |
1 | 0 | 0 |
attr |
Mamba2DecoderLayer.norm |
1 | 0 | 0 |
meth |
Mamba2Model.init |
3 | 2 | 0 |
attr |
Mamba2Model.config |
1 | 0 | 0 |
attr |
Mamba2Model.vocab_size |
1 | 0 | 0 |
attr |
Mamba2Model.embeddings |
1 | 0 | 0 |
attr |
Mamba2Model.norm_f |
1 | 0 | 0 |
attr |
Mamba2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Mamba2ForCausalLM.init |
3 | 2 | 0 |
meth |
Mamba2ForCausalLM.forward |
6 | 4 | 0 |
meth |
Mamba2ForCausalLM.copy_inputs_before_cuda_graphs |
3 | 0 | 0 |
meth |
Mamba2ForCausalLM.get_seqlen_agnostic_capture_inputs |
2 | 1 | 0 |
attr |
Mamba2ForCausalLM.config |
1 | 0 | 0 |
attr |
Mamba2ForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
Mamba2ForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
Mamba2ForCausalLM.model_config |
1 | 0 | 0 |
attr |
Mamba2ForCausalLM.backbone |
1 | 0 | 0 |
attr |
Mamba2ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Mamba2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Mamba2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.medusa (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Medusa.config |
1 | 0 | 0 |
attr |
Medusa.blocks |
1 | 0 | 0 |
attr |
Medusa.orig_vocab_size |
1 | 0 | 0 |
attr |
Medusa.truncated_vocab_size |
1 | 0 | 0 |
attr |
Medusa.logits_processor |
1 | 0 | 0 |
attr |
Medusa.token_map |
1 | 0 | 0 |
attr |
Medusa.lm_head |
1 | 0 | 0 |
attr |
Medusa.lm_heads |
1 | 0 | 0 |
attr |
ResidualBlock.layers |
1 | 0 | 0 |
attr |
ResidualBlock.act |
1 | 0 | 0 |
vllm.model_executor.models.midashenglm (70 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiDashengLMModel.init |
3 | 2 | 0 |
attr |
MiDashengLMModel.config |
1 | 0 | 0 |
attr |
MiDashengLMModel.quant_config |
1 | 0 | 0 |
attr |
MiDashengLMModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MiDashengLMModel.audio_encoder |
1 | 0 | 0 |
attr |
MiDashengLMModel.audio_projector |
1 | 0 | 0 |
attr |
MiDashengLMModel.decoder |
1 | 0 | 0 |
meth |
MiDashengLMProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
MiDashengLMProcessingInfo.get_feature_extractor |
1 | 0 | 0 |
meth |
MiDashengLMProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
MiDashengLMProcessingInfo.get_min_audio_len |
1 | 0 | 0 |
meth |
MiDashengLMProcessingInfo.get_max_audio_len |
1 | 0 | 0 |
meth |
DashengAttention.init |
6 | 5 | 0 |
meth |
DashengAttention.forward |
3 | 2 | 0 |
attr |
DashengAttention.embed_dim |
1 | 0 | 0 |
attr |
DashengAttention.total_num_heads |
1 | 0 | 0 |
attr |
DashengAttention.num_heads |
1 | 0 | 0 |
attr |
DashengAttention.num_kv_heads |
1 | 0 | 0 |
attr |
DashengAttention.head_dim |
1 | 0 | 0 |
attr |
DashengAttention.q_size |
1 | 0 | 0 |
attr |
DashengAttention.kv_size |
1 | 0 | 0 |
attr |
DashengAttention.scale |
1 | 0 | 0 |
attr |
DashengAttention.qkv |
1 | 0 | 0 |
attr |
DashengAttention.proj |
1 | 0 | 0 |
meth |
AudioProjectorSubsample.init |
7 | 5 | 0 |
meth |
AudioProjectorSubsample.forward |
3 | 0 | 0 |
attr |
AudioProjectorSubsample.k |
1 | 0 | 0 |
attr |
AudioProjectorSubsample.net |
1 | 0 | 0 |
meth |
DashengBlock.init |
8 | 7 | 0 |
attr |
DashengBlock.norm1 |
1 | 0 | 0 |
attr |
DashengBlock.attn |
1 | 0 | 0 |
attr |
DashengBlock.ls1 |
1 | 0 | 0 |
attr |
DashengBlock.norm2 |
1 | 0 | 0 |
attr |
DashengBlock.mlp |
1 | 0 | 0 |
attr |
DashengBlock.ls2 |
1 | 0 | 0 |
func |
calculate_mel_frames_dasheng |
7 | 6 | 0 |
meth |
DashengMlp.init |
6 | 5 | 0 |
attr |
DashengMlp.fc1 |
1 | 0 | 0 |
attr |
DashengMlp.act |
1 | 0 | 0 |
attr |
DashengMlp.fc2 |
1 | 0 | 0 |
meth |
AudioPatchEmbed.init |
8 | 7 | 0 |
attr |
AudioPatchEmbed.input_size |
1 | 0 | 0 |
attr |
AudioPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
AudioPatchEmbed.patch_stride |
1 | 0 | 0 |
attr |
AudioPatchEmbed.grid_size |
1 | 0 | 0 |
attr |
AudioPatchEmbed.num_patches |
1 | 0 | 0 |
attr |
AudioPatchEmbed.flatten |
1 | 0 | 0 |
attr |
AudioPatchEmbed.proj |
1 | 0 | 0 |
attr |
AudioPatchEmbed.norm |
1 | 0 | 0 |
meth |
LayerScale.init |
4 | 0 | 0 |
attr |
LayerScale.inplace |
1 | 0 | 0 |
attr |
LayerScale.gamma |
1 | 0 | 0 |
meth |
DashengFrontend.init |
2 | 1 | 0 |
attr |
DashengFrontend.config |
1 | 0 | 0 |
meth |
DashengAudioTransformer.init |
4 | 3 | 0 |
attr |
DashengAudioTransformer.target_length |
1 | 0 | 0 |
attr |
DashengAudioTransformer.hop_length |
1 | 0 | 0 |
attr |
DashengAudioTransformer.front_end |
1 | 0 | 0 |
attr |
DashengAudioTransformer.init_bn |
1 | 0 | 0 |
attr |
DashengAudioTransformer.patch_embed |
1 | 0 | 0 |
attr |
DashengAudioTransformer.time_pos_embed |
1 | 0 | 0 |
attr |
DashengAudioTransformer.freq_pos_embed |
1 | 0 | 0 |
attr |
DashengAudioTransformer.blocks |
1 | 0 | 0 |
attr |
DashengAudioTransformer.norm |
1 | 0 | 0 |
vllm.model_executor.models.mimo (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
MiMoForCausalLM.init |
3 | 2 | 0 |
attr |
MiMoForCausalLM.config |
1 | 0 | 0 |
attr |
MiMoForCausalLM.quant_config |
1 | 0 | 0 |
attr |
MiMoForCausalLM.model |
1 | 0 | 0 |
attr |
MiMoForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
MiMoForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MiMoForCausalLM.lm_head |
1 | 0 | 0 |
vllm.model_executor.models.mimo_mtp (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiMoMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
MiMoMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictor.mtp_layers |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictor.logits_processor |
1 | 0 | 0 |
meth |
MiMoMTP.init |
3 | 2 | 0 |
attr |
MiMoMTP.config |
1 | 0 | 0 |
attr |
MiMoMTP.model |
1 | 0 | 0 |
attr |
MiMoMTP.lm_head |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictorLayer.token_layernorm |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictorLayer.hidden_layernorm |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictorLayer.input_proj |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
attr |
MiMoMultiTokenPredictorLayer.final_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.mimo_v2_flash (66 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
MiMoV2FlashForCausalLM.init |
3 | 2 | 0 |
attr |
MiMoV2FlashForCausalLM.config |
1 | 0 | 0 |
attr |
MiMoV2FlashForCausalLM.quant_config |
1 | 0 | 0 |
attr |
MiMoV2FlashForCausalLM.model |
1 | 0 | 0 |
attr |
MiMoV2FlashForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
MiMoV2FlashForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MiMoV2FlashForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MiMoV2Model.init |
3 | 2 | 0 |
attr |
MiMoV2Model.config |
1 | 0 | 0 |
attr |
MiMoV2Model.quant_config |
1 | 0 | 0 |
attr |
MiMoV2Model.vocab_size |
1 | 0 | 0 |
attr |
MiMoV2Model.num_redundant_experts |
1 | 0 | 0 |
attr |
MiMoV2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MiMoV2Model.embed_tokens |
1 | 0 | 0 |
attr |
MiMoV2Model.norm |
1 | 0 | 0 |
attr |
MiMoV2FlashDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
MiMoV2FlashDecoderLayer.config |
1 | 0 | 0 |
attr |
MiMoV2FlashDecoderLayer.layer_id |
1 | 0 | 0 |
attr |
MiMoV2FlashDecoderLayer.is_layer_sparse |
1 | 0 | 0 |
attr |
MiMoV2FlashDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
MiMoV2FlashDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
MiMoV2FlashDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
MiMoV2FlashDecoderLayer.mlp |
1 | 0 | 0 |
attr |
MiMoV2Attention.hidden_size |
1 | 0 | 0 |
attr |
MiMoV2Attention.layer_id |
1 | 0 | 0 |
attr |
MiMoV2Attention.total_num_heads |
1 | 0 | 0 |
attr |
MiMoV2Attention.num_heads |
1 | 0 | 0 |
attr |
MiMoV2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
MiMoV2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
MiMoV2Attention.head_dim |
1 | 0 | 0 |
attr |
MiMoV2Attention.v_head_dim |
1 | 0 | 0 |
attr |
MiMoV2Attention.q_size |
1 | 0 | 0 |
attr |
MiMoV2Attention.k_size |
1 | 0 | 0 |
attr |
MiMoV2Attention.v_size |
1 | 0 | 0 |
attr |
MiMoV2Attention.v_scale |
1 | 0 | 0 |
attr |
MiMoV2Attention.scaling |
1 | 0 | 0 |
attr |
MiMoV2Attention.rope_theta |
1 | 0 | 0 |
attr |
MiMoV2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
MiMoV2Attention.qkv_proj |
1 | 0 | 0 |
attr |
MiMoV2Attention.o_proj |
1 | 0 | 0 |
attr |
MiMoV2Attention.rotary_emb |
1 | 0 | 0 |
attr |
MiMoV2Attention.attention_sink_bias |
1 | 0 | 0 |
attr |
MiMoV2Attention.attn |
1 | 0 | 0 |
meth |
MiMoV2MLP.forward |
2 | 0 | 0 |
attr |
MiMoV2MLP.gate_up_proj |
1 | 0 | 0 |
attr |
MiMoV2MLP.down_proj |
1 | 0 | 0 |
attr |
MiMoV2MLP.act_fn |
1 | 0 | 0 |
meth |
MiMoV2MoE.init |
4 | 3 | 0 |
attr |
MiMoV2MoE.tp_size |
1 | 0 | 0 |
attr |
MiMoV2MoE.ep_group |
1 | 0 | 0 |
attr |
MiMoV2MoE.ep_rank |
1 | 0 | 0 |
attr |
MiMoV2MoE.ep_size |
1 | 0 | 0 |
attr |
MiMoV2MoE.n_routed_experts |
1 | 0 | 0 |
attr |
MiMoV2MoE.is_sequence_parallel |
1 | 0 | 0 |
attr |
MiMoV2MoE.enable_eplb |
1 | 0 | 0 |
attr |
MiMoV2MoE.n_logical_experts |
1 | 0 | 0 |
attr |
MiMoV2MoE.n_redundant_experts |
1 | 0 | 0 |
attr |
MiMoV2MoE.n_physical_experts |
1 | 0 | 0 |
attr |
MiMoV2MoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
MiMoV2MoE.physical_expert_start |
1 | 0 | 0 |
attr |
MiMoV2MoE.physical_expert_end |
1 | 0 | 0 |
attr |
MiMoV2MoE.gate_dtype |
1 | 0 | 0 |
attr |
MiMoV2MoE.gate |
1 | 0 | 0 |
attr |
MiMoV2MoE.experts |
1 | 0 | 0 |
vllm.model_executor.models.minicpm (61 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniCPMForCausalLM.init |
3 | 2 | 0 |
meth |
MiniCPMForCausalLM._init_model |
3 | 2 | 0 |
attr |
MiniCPMForCausalLM.prefix |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.config |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.cache_config |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.quant_config |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.model |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.lm_head |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.scale_width |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
MiniCPMForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
MiniCPMDecoderLayer._init_attn_block |
1 | 0 | 0 |
meth |
MiniCPMDecoderLayer._init_ffn_block |
1 | 0 | 0 |
attr |
MiniCPMDecoderLayer.config |
1 | 0 | 0 |
attr |
MiniCPMDecoderLayer.cache_config |
1 | 0 | 0 |
attr |
MiniCPMDecoderLayer.quant_config |
1 | 0 | 0 |
attr |
MiniCPMDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
MiniCPMDecoderLayer.max_position_embeddings |
1 | 0 | 0 |
attr |
MiniCPMDecoderLayer.prefix |
1 | 0 | 0 |
meth |
MiniCPMMoE.init |
8 | 7 | 0 |
meth |
MiniCPMMoE.weight_loader |
5 | 4 | 0 |
attr |
MiniCPMMoE.tp_size |
1 | 0 | 0 |
attr |
MiniCPMMoE.num_total_experts |
1 | 0 | 0 |
attr |
MiniCPMMoE.top_k |
1 | 0 | 0 |
attr |
MiniCPMMoE.hidden_size |
1 | 0 | 0 |
attr |
MiniCPMMoE.intermediate_size |
1 | 0 | 0 |
attr |
MiniCPMMoE.params_dtype |
1 | 0 | 0 |
attr |
MiniCPMMoE.gate |
1 | 0 | 0 |
attr |
MiniCPMMoE.ws |
1 | 0 | 0 |
attr |
MiniCPMMoE.w2s |
1 | 0 | 0 |
attr |
MiniCPMAttention.hidden_size |
1 | 0 | 0 |
attr |
MiniCPMAttention.total_num_heads |
1 | 0 | 0 |
attr |
MiniCPMAttention.num_heads |
1 | 0 | 0 |
attr |
MiniCPMAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
MiniCPMAttention.num_kv_heads |
1 | 0 | 0 |
attr |
MiniCPMAttention.head_dim |
1 | 0 | 0 |
attr |
MiniCPMAttention.q_size |
1 | 0 | 0 |
attr |
MiniCPMAttention.kv_size |
1 | 0 | 0 |
attr |
MiniCPMAttention.scaling |
1 | 0 | 0 |
attr |
MiniCPMAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
MiniCPMAttention.qkv_proj |
1 | 0 | 0 |
attr |
MiniCPMAttention.o_proj |
1 | 0 | 0 |
attr |
MiniCPMAttention.rotary_emb |
1 | 0 | 0 |
attr |
MiniCPMAttention.attn |
1 | 0 | 0 |
meth |
MiniCPMModel.init |
3 | 2 | 0 |
meth |
MiniCPMModel._init_layers |
5 | 4 | 0 |
attr |
MiniCPMModel.config |
1 | 0 | 0 |
attr |
MiniCPMModel.cache_config |
1 | 0 | 0 |
attr |
MiniCPMModel.quant_config |
1 | 0 | 0 |
attr |
MiniCPMModel.vocab_size |
1 | 0 | 0 |
attr |
MiniCPMModel.embed_tokens |
1 | 0 | 0 |
attr |
MiniCPMModel.num_experts |
1 | 0 | 0 |
attr |
MiniCPMModel.norm |
1 | 0 | 0 |
attr |
MiniCPMModel.aux_hidden_state_layers |
1 | 0 | 0 |
attr |
MiniCPMModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
MiniCPMMLP.forward |
2 | 0 | 0 |
attr |
MiniCPMMLP.gate_up_proj |
1 | 0 | 0 |
attr |
MiniCPMMLP.down_proj |
1 | 0 | 0 |
attr |
MiniCPMMLP.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.minicpm3 (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniCPM3Model._init_layers |
5 | 4 | 0 |
attr |
MiniCPM3Attention.hidden_size |
1 | 0 | 0 |
attr |
MiniCPM3Attention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
MiniCPM3Attention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
MiniCPM3Attention.qk_head_dim |
1 | 0 | 0 |
attr |
MiniCPM3Attention.v_head_dim |
1 | 0 | 0 |
attr |
MiniCPM3Attention.q_lora_rank |
1 | 0 | 0 |
attr |
MiniCPM3Attention.kv_lora_rank |
1 | 0 | 0 |
attr |
MiniCPM3Attention.num_heads |
1 | 0 | 0 |
attr |
MiniCPM3Attention.num_local_heads |
1 | 0 | 0 |
attr |
MiniCPM3Attention.scaling |
1 | 0 | 0 |
attr |
MiniCPM3Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
MiniCPM3Attention.q_a_proj |
1 | 0 | 0 |
attr |
MiniCPM3Attention.q_a_layernorm |
1 | 0 | 0 |
attr |
MiniCPM3Attention.q_b_proj |
1 | 0 | 0 |
attr |
MiniCPM3Attention.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
MiniCPM3Attention.kv_a_layernorm |
1 | 0 | 0 |
attr |
MiniCPM3Attention.kv_b_proj |
1 | 0 | 0 |
attr |
MiniCPM3Attention.o_proj |
1 | 0 | 0 |
attr |
MiniCPM3Attention.rotary_emb |
1 | 0 | 0 |
attr |
MiniCPM3Attention.attn |
1 | 0 | 0 |
meth |
MiniCPM3ForCausalLM._init_model |
3 | 2 | 0 |
meth |
MiniCPM3DecoderLayer._init_attn_block |
1 | 0 | 0 |
vllm.model_executor.models.minicpm_eagle (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EagleMiniCPMDecoderLayer._init_attn_block |
1 | 0 | 0 |
meth |
EagleMiniCPMDecoderLayer._init_ffn_block |
1 | 0 | 0 |
attr |
EagleMiniCPMDecoderLayer.config |
1 | 0 | 0 |
attr |
EagleMiniCPMDecoderLayer.cache_config |
1 | 0 | 0 |
attr |
EagleMiniCPMDecoderLayer.quant_config |
1 | 0 | 0 |
attr |
EagleMiniCPMDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
EagleMiniCPMDecoderLayer.max_position_embeddings |
1 | 0 | 0 |
attr |
EagleMiniCPMDecoderLayer.prefix |
1 | 0 | 0 |
meth |
EagleMiniCPMForCausalLM.init |
3 | 2 | 0 |
meth |
EagleMiniCPMForCausalLM._init_model |
4 | 3 | 0 |
attr |
EagleMiniCPMForCausalLM.prefix |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.config |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.cache_config |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.quant_config |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.model |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.lm_head |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.scale_width |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
EagleMiniCPMForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
EagleMiniCPMModel.init |
4 | 3 | 0 |
meth |
EagleMiniCPMModel._init_layers |
6 | 5 | 0 |
attr |
EagleMiniCPMModel.config |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.cache_config |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.quant_config |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.vocab_size |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.fc |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.input_norm1 |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.input_norm2 |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.embed_tokens |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.num_experts |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.norm |
1 | 0 | 0 |
attr |
EagleMiniCPMModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.minicpmo (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniCPMOProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
MiniCPMOBaseModel.init |
3 | 2 | 0 |
meth |
MiniCPMOBaseModel.init_audio_module |
3 | 2 | 0 |
meth |
MiniCPMOBaseModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
MiniCPMOBaseModel._process_multimodal_inputs |
2 | 1 | 0 |
attr |
MiniCPMOBaseModel.apm |
1 | 0 | 0 |
meth |
MiniCPMWhisperEncoderLayer.init |
3 | 2 | 0 |
attr |
MiniCPMWhisperEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
MiniCPMWhisperEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
MiniCPMWhisperEncoderLayer.self_attn_layer_norm |
1 | 0 | 0 |
attr |
MiniCPMWhisperEncoderLayer.dropout |
1 | 0 | 0 |
attr |
MiniCPMWhisperEncoderLayer.activation_fn |
1 | 0 | 0 |
attr |
MiniCPMWhisperEncoderLayer.activation_dropout |
1 | 0 | 0 |
attr |
MiniCPMWhisperEncoderLayer.fc1 |
1 | 0 | 0 |
attr |
MiniCPMWhisperEncoderLayer.fc2 |
1 | 0 | 0 |
attr |
MiniCPMWhisperEncoderLayer.final_layer_norm |
1 | 0 | 0 |
meth |
MultiModalProjector.init |
3 | 2 | 0 |
attr |
MultiModalProjector.linear1 |
1 | 0 | 0 |
attr |
MultiModalProjector.relu |
1 | 0 | 0 |
attr |
MultiModalProjector.linear2 |
1 | 0 | 0 |
meth |
MiniCPMO4_5.init |
3 | 2 | 0 |
attr |
MiniCPMO4_5.apm |
1 | 0 | 0 |
meth |
MiniCPMWhisperEncoder.init |
2 | 1 | 0 |
attr |
MiniCPMWhisperEncoder.layers |
1 | 0 | 0 |
attr |
CPU_DEVICE |
1 | 0 | 0 |
meth |
MiniCPMO2_6.init |
3 | 2 | 0 |
attr |
MiniCPMO2_6.apm |
1 | 0 | 0 |
meth |
MiniCPMO.new |
3 | 2 | 0 |
meth |
MiniCPMO.init |
3 | 2 | 0 |
vllm.model_executor.models.minicpmv (30 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniCPMV2_0.init |
3 | 2 | 0 |
meth |
MiniCPMVProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
MiniCPMVProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
MiniCPMVProcessingInfo.get_image_processor |
2 | 1 | 0 |
meth |
MiniCPMVProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
MiniCPMVProcessingInfo.get_model_version |
1 | 0 | 0 |
meth |
MiniCPMVBaseModel.init |
3 | 2 | 0 |
meth |
MiniCPMVBaseModel._process_multimodal_inputs |
2 | 1 | 0 |
meth |
MiniCPMVBaseModel.forward |
6 | 6 | 1 |
attr |
MiniCPMVBaseModel.use_data_parallel |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.config |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.multimodal_config |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.version |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.llm |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.vpm |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.vision_dim |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.embed_dim |
1 | 0 | 0 |
attr |
MiniCPMVBaseModel.resampler |
1 | 0 | 0 |
meth |
MiniCPMV4_0.init |
3 | 2 | 0 |
meth |
MiniCPMV.new |
3 | 2 | 0 |
meth |
MiniCPMV2_6.init |
3 | 2 | 0 |
meth |
MiniCPMV2_5.init |
3 | 2 | 0 |
meth |
Resampler4_5.get_1d_sincos_pos_embed_from_temporal_size |
3 | 2 | 0 |
meth |
Resampler4_5._adjust_temporal_pos_cache |
3 | 2 | 0 |
meth |
Resampler4_5._init_weights |
2 | 1 | 0 |
meth |
Resampler4_5.forward |
4 | 3 | 0 |
attr |
Resampler4_5.max_temporal_size |
1 | 0 | 0 |
attr |
Resampler2_5.max_size |
1 | 0 | 0 |
attr |
DEFAULT_LN |
1 | 0 | 0 |
meth |
MiniCPMV4_5.init |
3 | 2 | 0 |
vllm.model_executor.models.minimax_m2 (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxM2Model.init |
3 | 2 | 0 |
attr |
MiniMaxM2Model.config |
1 | 0 | 0 |
attr |
MiniMaxM2Model.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxM2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MiniMaxM2Model.embed_tokens |
1 | 0 | 0 |
attr |
MiniMaxM2Model.norm |
1 | 0 | 0 |
meth |
MiniMaxM2ForCausalLM.init |
3 | 2 | 0 |
meth |
MiniMaxM2ForCausalLM.forward |
6 | 5 | 0 |
attr |
MiniMaxM2ForCausalLM.config |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.model |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MiniMaxM2MoE.init |
4 | 3 | 0 |
attr |
MiniMaxM2MoE.tp_size |
1 | 0 | 0 |
attr |
MiniMaxM2MoE.use_routing_bias |
1 | 0 | 0 |
attr |
MiniMaxM2MoE.experts |
1 | 0 | 0 |
attr |
MiniMaxM2MoE.gate |
1 | 0 | 0 |
attr |
MiniMaxM2MoE.e_score_correction_bias |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.total_num_heads |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.num_heads |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.head_dim |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.q_size |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.kv_size |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.scaling |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.qkv_proj |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.o_proj |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.rotary_emb |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.attn |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.q_norm |
1 | 0 | 0 |
attr |
MiniMaxM2Attention.k_norm |
1 | 0 | 0 |
attr |
MiniMaxM2DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxM2DecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
MiniMaxM2DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
MiniMaxM2DecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
MiniMaxM2DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
MiniMaxM2DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.minimax_text_01 (73 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
weight_loader_with_alias |
2 | 1 | 0 |
meth |
MiniMaxText01DecoderLayer.forward |
7 | 6 | 0 |
attr |
MiniMaxText01DecoderLayer.prefix |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.expert_num |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.layernorm_mlp_alpha |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.layernorm_mlp_beta |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.postnorm |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.shared_moe |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.mlp |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.layernorm_attention_alpha |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.layernorm_attention_beta |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.shared_mlp |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.coefficient |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.shared_moe_mode |
1 | 0 | 0 |
attr |
MiniMaxText01DecoderLayer.block_sparse_moe |
1 | 0 | 0 |
meth |
MiniMaxText01Attention.forward |
5 | 4 | 0 |
attr |
MiniMaxText01Attention.layer_idx |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.total_num_heads |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.num_heads |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.num_kv_heads |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.head_dim |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.q_size |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.kv_size |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.scaling |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.sliding_window |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.prefix |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.qkv_proj |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.o_proj |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.attn |
1 | 0 | 0 |
attr |
MiniMaxText01Attention.rotary_emb |
1 | 0 | 0 |
attr |
MiniMaxText01MLP.layer_idx |
1 | 0 | 0 |
attr |
MiniMaxText01MLP.gate_up_proj |
1 | 0 | 0 |
attr |
MiniMaxText01MLP.down_proj |
1 | 0 | 0 |
attr |
MiniMaxText01MLP.act_fn |
1 | 0 | 0 |
meth |
MiniMaxText01ForCausalLM.copy_inputs_before_cuda_graphs |
3 | 0 | 0 |
meth |
MiniMaxText01ForCausalLM.get_seqlen_agnostic_capture_inputs |
2 | 1 | 0 |
meth |
MiniMaxText01ForCausalLM.forward |
6 | 5 | 0 |
attr |
MiniMaxText01ForCausalLM.config |
1 | 0 | 0 |
attr |
MiniMaxText01ForCausalLM.CONCAT_FFN |
1 | 0 | 0 |
attr |
MiniMaxText01ForCausalLM.model |
1 | 0 | 0 |
attr |
MiniMaxText01ForCausalLM.kv_cache |
1 | 0 | 0 |
attr |
MiniMaxText01ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
MiniMaxText01ForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
MiniMaxText01Model.init |
3 | 2 | 0 |
meth |
MiniMaxText01Model._clear_prefill_cache |
4 | 1 | 0 |
meth |
MiniMaxText01Model.forward |
6 | 5 | 0 |
attr |
MiniMaxText01Model.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxText01Model.decoder_attention_types |
1 | 0 | 0 |
attr |
MiniMaxText01Model.num_layers |
1 | 0 | 0 |
attr |
MiniMaxText01Model.cache_shape |
1 | 0 | 0 |
attr |
MiniMaxText01Model.embed_scale |
1 | 0 | 0 |
attr |
MiniMaxText01Model.embed_tokens |
1 | 0 | 0 |
attr |
MiniMaxText01Model.norm |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.layer_idx |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.tp_size |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.num_total_experts |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.top_k |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.intermediate_size |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.quant_config |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.params_dtype |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.gate |
1 | 0 | 0 |
attr |
MiniMaxText01MoE.experts |
1 | 0 | 0 |
vllm.model_executor.models.minimax_vl_01 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxVL01MultiModalProjector.init |
7 | 6 | 0 |
attr |
MiniMaxVL01MultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
MiniMaxVL01MultiModalProjector.act |
1 | 0 | 0 |
attr |
MiniMaxVL01MultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
MiniMaxVL01ProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
MiniMaxVL01ProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
MiniMaxVL01ForConditionalGeneration.pack_image_features |
3 | 2 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.vision_feature_layer |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.pad_token_id |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.image_newline |
1 | 0 | 0 |
attr |
MiniMaxVL01ForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.mistral (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MistralDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
MistralDecoderLayer.ada_rms_norm_t_cond |
1 | 0 | 0 |
meth |
MistralModel.init |
4 | 3 | 0 |
attr |
MistralAttention.do_llama_4_scaling |
1 | 0 | 0 |
attr |
MistralAttention.llama_4_scaling_original_max_position_embeddings |
1 | 0 | 0 |
attr |
MistralAttention.llama_4_scaling_beta |
1 | 0 | 0 |
meth |
MistralMLP.forward |
2 | 0 | 0 |
attr |
MistralMLP.gate_up_proj |
1 | 0 | 0 |
attr |
MistralMLP.down_proj |
1 | 0 | 0 |
attr |
MistralMLP.act_fn |
1 | 0 | 0 |
meth |
MistralForCausalLM.init |
4 | 3 | 0 |
meth |
MistralForCausalLM._init_model |
4 | 3 | 0 |
vllm.model_executor.models.mistral3 (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Mistral3ForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Mistral3ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Mistral3ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Mistral3ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Mistral3ForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
Mistral3ForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
Mistral3ForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
Mistral3MultiModalProjector.init |
9 | 8 | 0 |
attr |
Mistral3MultiModalProjector.norm |
1 | 0 | 0 |
attr |
Mistral3MultiModalProjector.patch_merger |
1 | 0 | 0 |
attr |
Mistral3MultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
Mistral3MultiModalProjector.act |
1 | 0 | 0 |
attr |
Mistral3MultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
BaseLlavaProcessingInfo.get_vision_encoder_info |
1 | 0 | 0 |
meth |
Mistral3ProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
Mistral3PatchMerger.init |
4 | 3 | 0 |
attr |
Mistral3PatchMerger.vision_hidden_size |
1 | 0 | 0 |
attr |
Mistral3PatchMerger.spatial_merge_size |
1 | 0 | 0 |
attr |
Mistral3PatchMerger.patch_size |
1 | 0 | 0 |
attr |
Mistral3PatchMerger.merging_layer |
1 | 0 | 0 |
vllm.model_executor.models.mistral_large_3_eagle (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EagleMistralLarge3ForCausalLM.init |
3 | 2 | 0 |
attr |
EagleMistralLarge3ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
EagleMistralLarge3ForCausalLM.model_cls |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
EagleMistralLarge3Model.init |
4 | 3 | 0 |
attr |
EagleMistralLarge3Model.config |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.vllm_config |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.vocab_size |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.embed_tokens |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.layers |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.start_layer |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.end_layer |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.fc |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.norm |
1 | 0 | 0 |
attr |
EagleMistralLarge3Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.mixtral (60 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MixtralAttention.hidden_size |
1 | 0 | 0 |
attr |
MixtralAttention.total_num_heads |
1 | 0 | 0 |
attr |
MixtralAttention.num_heads |
1 | 0 | 0 |
attr |
MixtralAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
MixtralAttention.num_kv_heads |
1 | 0 | 0 |
attr |
MixtralAttention.head_dim |
1 | 0 | 0 |
attr |
MixtralAttention.q_size |
1 | 0 | 0 |
attr |
MixtralAttention.kv_size |
1 | 0 | 0 |
attr |
MixtralAttention.scaling |
1 | 0 | 0 |
attr |
MixtralAttention.qkv_proj |
1 | 0 | 0 |
attr |
MixtralAttention.o_proj |
1 | 0 | 0 |
attr |
MixtralAttention.rotary_emb |
1 | 0 | 0 |
attr |
MixtralAttention.attn |
1 | 0 | 0 |
meth |
MixtralModel.init |
3 | 2 | 0 |
attr |
MixtralModel.config |
1 | 0 | 0 |
attr |
MixtralModel.quant_config |
1 | 0 | 0 |
attr |
MixtralModel.vocab_size |
1 | 0 | 0 |
attr |
MixtralModel.org_vocab_size |
1 | 0 | 0 |
attr |
MixtralModel.embed_tokens |
1 | 0 | 0 |
attr |
MixtralModel.enable_eplb |
1 | 0 | 0 |
attr |
MixtralModel.num_redundant_experts |
1 | 0 | 0 |
attr |
MixtralModel.norm |
1 | 0 | 0 |
attr |
MixtralModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
MixtralForCausalLM.init |
3 | 2 | 0 |
attr |
MixtralForCausalLM.config |
1 | 0 | 0 |
attr |
MixtralForCausalLM.quant_config |
1 | 0 | 0 |
attr |
MixtralForCausalLM.model |
1 | 0 | 0 |
attr |
MixtralForCausalLM.lm_head |
1 | 0 | 0 |
attr |
MixtralForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
MixtralForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MixtralForCausalLM.expert_weights |
1 | 0 | 0 |
attr |
MixtralForCausalLM.moe_layers |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_logical_experts |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_physical_experts |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_local_physical_experts |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_routed_experts |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_redundant_experts |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_expert_groups |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_shared_experts |
1 | 0 | 0 |
attr |
MixtralDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
MixtralDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
MixtralDecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
MixtralDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
MixtralDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
MixtralMoE.init |
11 | 10 | 0 |
attr |
MixtralMoE.hidden_size |
1 | 0 | 0 |
attr |
MixtralMoE.ep_group |
1 | 0 | 0 |
attr |
MixtralMoE.ep_rank |
1 | 0 | 0 |
attr |
MixtralMoE.ep_size |
1 | 0 | 0 |
attr |
MixtralMoE.enable_eplb |
1 | 0 | 0 |
attr |
MixtralMoE.n_routed_experts |
1 | 0 | 0 |
attr |
MixtralMoE.n_logical_experts |
1 | 0 | 0 |
attr |
MixtralMoE.n_redundant_experts |
1 | 0 | 0 |
attr |
MixtralMoE.n_physical_experts |
1 | 0 | 0 |
attr |
MixtralMoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
MixtralMoE.physical_expert_start |
1 | 0 | 0 |
attr |
MixtralMoE.physical_expert_end |
1 | 0 | 0 |
attr |
MixtralMoE.gate |
1 | 0 | 0 |
attr |
MixtralMoE.experts |
1 | 0 | 0 |
vllm.model_executor.models.mllama4 (86 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama4VisionMLP.init |
8 | 7 | 0 |
attr |
Llama4VisionMLP.fc1 |
1 | 0 | 0 |
attr |
Llama4VisionMLP.fc2 |
1 | 0 | 0 |
attr |
Llama4VisionMLP.activation_fn |
1 | 0 | 0 |
attr |
Llama4VisionMLP.output_activation |
1 | 0 | 0 |
meth |
Llama4VisionEncoderLayer.init |
4 | 3 | 0 |
meth |
Llama4VisionEncoderLayer.forward |
2 | 1 | 0 |
attr |
Llama4VisionEncoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Llama4VisionEncoderLayer.num_attention_heads |
1 | 0 | 0 |
attr |
Llama4VisionEncoderLayer.intermediate_size |
1 | 0 | 0 |
attr |
Llama4VisionEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
Llama4VisionEncoderLayer.mlp |
1 | 0 | 0 |
attr |
Llama4VisionEncoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Llama4VisionEncoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
Llama4MultiModalProjector.init |
4 | 2 | 0 |
meth |
Llama4MultiModalProjector.forward |
2 | 0 | 0 |
attr |
Llama4MultiModalProjector.linear_1 |
1 | 0 | 0 |
meth |
Llama4VisionPixelShuffleMLP.init |
4 | 2 | 0 |
attr |
Llama4VisionPixelShuffleMLP.pixel_shuffle_ratio |
1 | 0 | 0 |
attr |
Llama4VisionPixelShuffleMLP.inner_dim |
1 | 0 | 0 |
attr |
Llama4VisionPixelShuffleMLP.output_dim |
1 | 0 | 0 |
attr |
Llama4VisionPixelShuffleMLP.mlp |
1 | 0 | 0 |
meth |
Llama4UnfoldConvolution.init |
4 | 3 | 0 |
attr |
Llama4UnfoldConvolution.unfold |
1 | 0 | 0 |
attr |
Llama4UnfoldConvolution.linear |
1 | 0 | 0 |
meth |
Llama4VisionAttention.init |
4 | 3 | 0 |
attr |
Llama4VisionAttention.config |
1 | 0 | 0 |
attr |
Llama4VisionAttention.tp_size |
1 | 0 | 0 |
attr |
Llama4VisionAttention.embed_dim |
1 | 0 | 0 |
attr |
Llama4VisionAttention.num_heads |
1 | 0 | 0 |
attr |
Llama4VisionAttention.head_dim |
1 | 0 | 0 |
attr |
Llama4VisionAttention.num_local_heads |
1 | 0 | 0 |
attr |
Llama4VisionAttention.q_size |
1 | 0 | 0 |
attr |
Llama4VisionAttention.kv_size |
1 | 0 | 0 |
attr |
Llama4VisionAttention.attention_dropout |
1 | 0 | 0 |
attr |
Llama4VisionAttention.scaling |
1 | 0 | 0 |
attr |
Llama4VisionAttention.attn |
1 | 0 | 0 |
attr |
Llama4VisionAttention.rotary_emb |
1 | 0 | 0 |
attr |
Llama4VisionAttention.qkv_proj |
1 | 0 | 0 |
attr |
Llama4VisionAttention.o_proj |
1 | 0 | 0 |
meth |
Llama4VisionEncoder.init |
4 | 3 | 0 |
attr |
Llama4VisionEncoder.config |
1 | 0 | 0 |
attr |
Llama4VisionEncoder.layers |
1 | 0 | 0 |
meth |
Llama4VisionModel.init |
4 | 3 | 0 |
attr |
Llama4VisionModel.config |
1 | 0 | 0 |
attr |
Llama4VisionModel.image_size |
1 | 0 | 0 |
attr |
Llama4VisionModel.patch_size |
1 | 0 | 0 |
attr |
Llama4VisionModel.hidden_size |
1 | 0 | 0 |
attr |
Llama4VisionModel.num_channels |
1 | 0 | 0 |
attr |
Llama4VisionModel.num_patches |
1 | 0 | 0 |
attr |
Llama4VisionModel.scale |
1 | 0 | 0 |
attr |
Llama4VisionModel.patch_embedding |
1 | 0 | 0 |
attr |
Llama4VisionModel.class_embedding |
1 | 0 | 0 |
attr |
Llama4VisionModel.positional_embedding_vlm |
1 | 0 | 0 |
attr |
Llama4VisionModel.layernorm_pre |
1 | 0 | 0 |
attr |
Llama4VisionModel.layernorm_post |
1 | 0 | 0 |
attr |
Llama4VisionModel.model |
1 | 0 | 0 |
attr |
Llama4VisionModel.vision_adapter |
1 | 0 | 0 |
meth |
Llama4ForConditionalGeneration.init |
3 | 2 | 0 |
meth |
Llama4ForConditionalGeneration.set_eplb_state |
4 | 3 | 0 |
meth |
Llama4ForConditionalGeneration.update_physical_experts_metadata |
3 | 2 | 0 |
meth |
Llama4ForConditionalGeneration.embed_multimodal |
2 | 1 | 0 |
attr |
Llama4ForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.vllm_config |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.num_expert_groups |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.num_logical_experts |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.num_physical_experts |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.num_local_physical_experts |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.num_routed_experts |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.num_shared_experts |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.num_redundant_experts |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.moe_layers |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.num_moe_layers |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.vision_model |
1 | 0 | 0 |
func |
pixel_shuffle |
3 | 0 | 0 |
vllm.model_executor.models.mlp_speculator (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MLPSpeculatorLayerNorm.init |
4 | 0 | 0 |
meth |
MLPSpeculatorLayerNorm.forward |
2 | 0 | 0 |
attr |
MLPSpeculatorLayerNorm.elementwise_scale_and_shift |
1 | 0 | 0 |
attr |
MLPSpeculatorLayerNorm.eps |
1 | 0 | 0 |
attr |
MLPSpeculatorLayerNorm.weight |
1 | 0 | 0 |
attr |
MLPSpeculatorLayerNorm.bias |
1 | 0 | 0 |
attr |
MLPSpeculator.n_predict |
1 | 0 | 0 |
attr |
MLPSpeculator.vocab_size |
1 | 0 | 0 |
attr |
MLPSpeculator.emb_dim |
1 | 0 | 0 |
attr |
MLPSpeculator.inner_dim |
1 | 0 | 0 |
attr |
MLPSpeculator.max_speculative_tokens |
1 | 0 | 0 |
attr |
MLPSpeculator.tie_weights |
1 | 0 | 0 |
attr |
MLPSpeculator.scale_input |
1 | 0 | 0 |
attr |
MLPSpeculator.state_weight |
1 | 0 | 0 |
attr |
MLPSpeculator.emb_weight |
1 | 0 | 0 |
attr |
MLPSpeculator.activation |
1 | 0 | 0 |
attr |
MLPSpeculator.config |
1 | 0 | 0 |
attr |
MLPSpeculator.logits_processor |
1 | 0 | 0 |
attr |
MLPSpeculator.emb |
1 | 0 | 0 |
attr |
MLPSpeculator.proj |
1 | 0 | 0 |
attr |
MLPSpeculator.head |
1 | 0 | 0 |
attr |
MLPSpeculator.ln |
1 | 0 | 0 |
attr |
MLPSpeculator.ln0 |
1 | 0 | 0 |
vllm.model_executor.models.modernbert (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernBertForTokenClassification.init |
3 | 2 | 0 |
meth |
ModernBertForTokenClassification.load_weights |
2 | 1 | 0 |
attr |
ModernBertForTokenClassification.head_dtype |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.model |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.head |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.classifier |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.pooler |
1 | 0 | 0 |
meth |
ModernBertModel.init |
3 | 2 | 0 |
attr |
ModernBertModel.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
ModernBertModel.config |
1 | 0 | 0 |
attr |
ModernBertModel.embeddings |
1 | 0 | 0 |
attr |
ModernBertModel.encoder_layer |
1 | 0 | 0 |
attr |
ModernBertModel.final_norm |
1 | 0 | 0 |
meth |
ModernBertForSequenceClassification.init |
3 | 2 | 0 |
meth |
ModernBertForSequenceClassification.load_weights |
2 | 1 | 0 |
attr |
ModernBertForSequenceClassification.config |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.model |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.pooling |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.pooler |
1 | 0 | 0 |
meth |
ModernBertEncoderLayer.init |
3 | 2 | 0 |
attr |
ModernBertEncoderLayer.layers |
1 | 0 | 0 |
meth |
ModernBertPooler.init |
2 | 1 | 0 |
attr |
ModernBertPooler.dense |
1 | 0 | 0 |
attr |
ModernBertPooler.act |
1 | 0 | 0 |
attr |
ModernBertPooler.norm |
1 | 0 | 0 |
attr |
ModernBertPooler.head |
1 | 0 | 0 |
meth |
ModernBertMLP.init |
3 | 2 | 0 |
attr |
ModernBertMLP.config |
1 | 0 | 0 |
attr |
ModernBertMLP.Wi |
1 | 0 | 0 |
attr |
ModernBertMLP.act |
1 | 0 | 0 |
attr |
ModernBertMLP.Wo |
1 | 0 | 0 |
meth |
ModernBertPredictionHead.init |
2 | 0 | 0 |
attr |
ModernBertPredictionHead.config |
1 | 0 | 0 |
attr |
ModernBertPredictionHead.dense |
1 | 0 | 0 |
attr |
ModernBertPredictionHead.act |
1 | 0 | 0 |
attr |
ModernBertPredictionHead.norm |
1 | 0 | 0 |
meth |
ModernBertEmbeddings.init |
2 | 1 | 0 |
attr |
ModernBertEmbeddings.config |
1 | 0 | 0 |
attr |
ModernBertEmbeddings.tok_embeddings |
1 | 0 | 0 |
attr |
ModernBertEmbeddings.norm |
1 | 0 | 0 |
meth |
ModernBertLayer.init |
4 | 3 | 0 |
attr |
ModernBertLayer.config |
1 | 0 | 0 |
attr |
ModernBertLayer.attn |
1 | 0 | 0 |
attr |
ModernBertLayer.mlp_norm |
1 | 0 | 0 |
attr |
ModernBertLayer.mlp |
1 | 0 | 0 |
attr |
ModernBertLayer.attn_norm |
1 | 0 | 0 |
meth |
ModernBertAttention.init |
4 | 3 | 0 |
attr |
ModernBertAttention.config |
1 | 0 | 0 |
attr |
ModernBertAttention.hidden_size |
1 | 0 | 0 |
attr |
ModernBertAttention.layer_id |
1 | 0 | 0 |
attr |
ModernBertAttention.deterministic_flash_attn |
1 | 0 | 0 |
attr |
ModernBertAttention.num_heads |
1 | 0 | 0 |
attr |
ModernBertAttention.head_dim |
1 | 0 | 0 |
attr |
ModernBertAttention.all_head_size |
1 | 0 | 0 |
attr |
ModernBertAttention.scaling |
1 | 0 | 0 |
attr |
ModernBertAttention.Wqkv |
1 | 0 | 0 |
attr |
ModernBertAttention.rotary_emb |
1 | 0 | 0 |
attr |
ModernBertAttention.attn |
1 | 0 | 0 |
attr |
ModernBertAttention.Wo |
1 | 0 | 0 |
vllm.model_executor.models.module_mapping (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MultiModelKeys.from_string_field |
6 | 5 | 0 |
vllm.model_executor.models.molmo (91 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MolmoModel.init |
3 | 2 | 0 |
attr |
MolmoModel.config |
1 | 0 | 0 |
attr |
MolmoModel.embedding_size |
1 | 0 | 0 |
attr |
MolmoModel.embed_tokens |
1 | 0 | 0 |
attr |
MolmoModel.norm |
1 | 0 | 0 |
attr |
MolmoModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
MultiHeadDotProductAttention.init |
6 | 5 | 0 |
attr |
MultiHeadDotProductAttention.hidden_size |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.total_num_heads |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.num_heads |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.head_dim |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.num_kv_heads |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.wq |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.wk |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.wv |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.wo |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.scale |
1 | 0 | 0 |
attr |
MultiHeadDotProductAttention.attn |
1 | 0 | 0 |
meth |
MolmoProcessorWrapper.init |
2 | 1 | 0 |
meth |
MolmoProcessorWrapper.call |
5 | 4 | 0 |
attr |
MolmoProcessorWrapper.processor |
1 | 0 | 0 |
attr |
MolmoVisionBackbone.vit_layers |
1 | 0 | 0 |
attr |
MolmoVisionBackbone.image_num_patch |
1 | 0 | 0 |
attr |
MolmoVisionBackbone.llm_patches_per_crop |
1 | 0 | 0 |
attr |
MolmoVisionBackbone.image_vit |
1 | 0 | 0 |
attr |
MolmoVisionBackbone.num_prefix_tokens |
1 | 0 | 0 |
attr |
MolmoVisionBackbone.image_pooling_2d |
1 | 0 | 0 |
attr |
MolmoVisionBackbone.image_projector |
1 | 0 | 0 |
attr |
MolmoVisionBackbone.pad_embed |
1 | 0 | 0 |
meth |
VisionTransformer.init |
4 | 3 | 0 |
attr |
VisionTransformer.patch_num |
1 | 0 | 0 |
attr |
VisionTransformer.class_embedding |
1 | 0 | 0 |
attr |
VisionTransformer.positional_embedding |
1 | 0 | 0 |
attr |
VisionTransformer.patch_embedding |
1 | 0 | 0 |
attr |
VisionTransformer.pre_ln |
1 | 0 | 0 |
attr |
VisionTransformer.transformer |
1 | 0 | 0 |
attr |
MolmoDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
MolmoDecoderLayer.mlp |
1 | 0 | 0 |
attr |
MolmoDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
MolmoDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
MolmoAttention.hidden_size |
1 | 0 | 0 |
attr |
MolmoAttention.tp_size |
1 | 0 | 0 |
attr |
MolmoAttention.total_num_heads |
1 | 0 | 0 |
attr |
MolmoAttention.num_heads |
1 | 0 | 0 |
attr |
MolmoAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
MolmoAttention.num_kv_heads |
1 | 0 | 0 |
attr |
MolmoAttention.head_dim |
1 | 0 | 0 |
attr |
MolmoAttention.q_size |
1 | 0 | 0 |
attr |
MolmoAttention.kv_size |
1 | 0 | 0 |
attr |
MolmoAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
MolmoAttention.qkv_proj |
1 | 0 | 0 |
attr |
MolmoAttention.rotary_emb |
1 | 0 | 0 |
attr |
MolmoAttention.scaling |
1 | 0 | 0 |
attr |
MolmoAttention.attn |
1 | 0 | 0 |
attr |
MolmoAttention.o_proj |
1 | 0 | 0 |
meth |
BlockCollection.init |
4 | 3 | 0 |
attr |
BlockCollection.resblocks |
1 | 0 | 0 |
meth |
ResidualAttentionBlock.init |
4 | 3 | 0 |
attr |
ResidualAttentionBlock.attention |
1 | 0 | 0 |
attr |
ResidualAttentionBlock.feed_forward |
1 | 0 | 0 |
attr |
ResidualAttentionBlock.attention_norm |
1 | 0 | 0 |
attr |
ResidualAttentionBlock.ffn_norm |
1 | 0 | 0 |
meth |
ViTMLP.init |
4 | 3 | 0 |
attr |
ViTMLP.w1 |
1 | 0 | 0 |
attr |
ViTMLP.act |
1 | 0 | 0 |
attr |
ViTMLP.w2 |
1 | 0 | 0 |
meth |
VisionBackboneConfig.post_init |
1 | 0 | 0 |
prop |
VisionBackboneConfig.image_num_patch |
1 | 0 | 0 |
meth |
MolmoForCausalLM.init |
3 | 2 | 0 |
meth |
MolmoForCausalLM.load_weights |
2 | 1 | 0 |
attr |
MolmoForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
MolmoForCausalLM.config |
1 | 0 | 0 |
attr |
MolmoForCausalLM.multimodal_config |
1 | 0 | 0 |
attr |
MolmoForCausalLM.img_patch_id |
1 | 0 | 0 |
attr |
MolmoForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
MolmoForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
MolmoForCausalLM.vision_backbone |
1 | 0 | 0 |
attr |
MolmoForCausalLM.model |
1 | 0 | 0 |
attr |
MolmoForCausalLM.lm_head |
1 | 0 | 0 |
attr |
LanguageModelMLP.hidden_size |
1 | 0 | 0 |
attr |
LanguageModelMLP.intermediate_size |
1 | 0 | 0 |
attr |
LanguageModelMLP.gate_up_proj |
1 | 0 | 0 |
attr |
LanguageModelMLP.act_fn |
1 | 0 | 0 |
attr |
LanguageModelMLP.down_proj |
1 | 0 | 0 |
func |
select_tiling |
5 | 4 | 0 |
attr |
ImageProjectorMLP.hidden_size |
1 | 0 | 0 |
attr |
ImageProjectorMLP.intermediate_size |
1 | 0 | 0 |
attr |
ImageProjectorMLP.merged_linear |
1 | 0 | 0 |
attr |
ImageProjectorMLP.act_fn |
1 | 0 | 0 |
attr |
ImageProjectorMLP.down_proj |
1 | 0 | 0 |
vllm.model_executor.models.molmo2 (101 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Molmo2TextModel.init |
3 | 2 | 0 |
attr |
Molmo2TextModel.config |
1 | 0 | 0 |
attr |
Molmo2TextModel.embedding_size |
1 | 0 | 0 |
attr |
Molmo2TextModel.embed_tokens |
1 | 0 | 0 |
attr |
Molmo2TextModel.norm |
1 | 0 | 0 |
attr |
Molmo2TextModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Molmo2ForConditionalGeneration.init |
3 | 2 | 0 |
meth |
Molmo2ForConditionalGeneration.load_weights |
2 | 1 | 0 |
prop |
Molmo2ForConditionalGeneration.dtype |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.img_patch_id |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.logits_processor |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.vision_backbone |
1 | 0 | 0 |
attr |
Molmo2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Molmo2VisionBlock.attention |
1 | 0 | 0 |
attr |
Molmo2VisionBlock.feed_forward |
1 | 0 | 0 |
attr |
Molmo2VisionBlock.attention_norm |
1 | 0 | 0 |
attr |
Molmo2VisionBlock.ffn_norm |
1 | 0 | 0 |
attr |
Molmo2VisionTransformer.patch_num |
1 | 0 | 0 |
attr |
Molmo2VisionTransformer.positional_embedding |
1 | 0 | 0 |
attr |
Molmo2VisionTransformer.patch_embedding |
1 | 0 | 0 |
attr |
Molmo2VisionTransformer.transformer |
1 | 0 | 0 |
attr |
Molmo2DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Molmo2DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Molmo2DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Molmo2DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Molmo2VisionBackbone.vit_config |
1 | 0 | 0 |
attr |
Molmo2VisionBackbone.adapter_config |
1 | 0 | 0 |
attr |
Molmo2VisionBackbone.vit_layers |
1 | 0 | 0 |
attr |
Molmo2VisionBackbone.image_vit |
1 | 0 | 0 |
attr |
Molmo2VisionBackbone.image_pooling_2d |
1 | 0 | 0 |
attr |
Molmo2VisionBackbone.image_projector |
1 | 0 | 0 |
meth |
Molmo2ProcessorWrapper.init |
3 | 2 | 0 |
attr |
Molmo2ProcessorWrapper.processor |
1 | 0 | 0 |
attr |
Molmo2ProcessorWrapper.hf_config |
1 | 0 | 0 |
attr |
ImagePoolingAttention.input_dim |
1 | 0 | 0 |
attr |
ImagePoolingAttention.hidden_size |
1 | 0 | 0 |
attr |
ImagePoolingAttention.total_num_heads |
1 | 0 | 0 |
attr |
ImagePoolingAttention.num_heads |
1 | 0 | 0 |
attr |
ImagePoolingAttention.head_dim |
1 | 0 | 0 |
attr |
ImagePoolingAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
ImagePoolingAttention.num_kv_heads |
1 | 0 | 0 |
attr |
ImagePoolingAttention.kv_size |
1 | 0 | 0 |
attr |
ImagePoolingAttention.q_proj |
1 | 0 | 0 |
attr |
ImagePoolingAttention.merged_kv |
1 | 0 | 0 |
attr |
ImagePoolingAttention.o_proj |
1 | 0 | 0 |
attr |
ImagePoolingAttention.scale |
1 | 0 | 0 |
attr |
ImagePoolingAttention.use_pytorch_sdpa |
1 | 0 | 0 |
attr |
ImagePoolingAttention.attn |
1 | 0 | 0 |
meth |
Molmo2ProcessingInfo.get_data_parser |
1 | 0 | 0 |
attr |
ViTMLP.w1 |
1 | 0 | 0 |
attr |
ViTMLP.act |
1 | 0 | 0 |
attr |
ViTMLP.w2 |
1 | 0 | 0 |
attr |
Molmo2Attention.hidden_size |
1 | 0 | 0 |
attr |
Molmo2Attention.tp_size |
1 | 0 | 0 |
attr |
Molmo2Attention.total_num_heads |
1 | 0 | 0 |
attr |
Molmo2Attention.num_heads |
1 | 0 | 0 |
attr |
Molmo2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Molmo2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Molmo2Attention.head_dim |
1 | 0 | 0 |
attr |
Molmo2Attention.q_size |
1 | 0 | 0 |
attr |
Molmo2Attention.kv_size |
1 | 0 | 0 |
attr |
Molmo2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Molmo2Attention.rope_theta |
1 | 0 | 0 |
attr |
Molmo2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Molmo2Attention.rotary_emb |
1 | 0 | 0 |
attr |
Molmo2Attention.scaling |
1 | 0 | 0 |
attr |
Molmo2Attention.attn |
1 | 0 | 0 |
attr |
Molmo2Attention.o_proj |
1 | 0 | 0 |
attr |
LanguageModelMLP.up_gate_proj |
1 | 0 | 0 |
attr |
LanguageModelMLP.act_fn |
1 | 0 | 0 |
attr |
LanguageModelMLP.down_proj |
1 | 0 | 0 |
func |
get_frame_times_and_chosen_fps |
5 | 0 | 0 |
meth |
VitConfig.post_init |
1 | 0 | 0 |
prop |
VitConfig.image_num_patch |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.hidden_size |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.total_num_heads |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.num_heads |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.head_dim |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.num_kv_heads |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.q_size |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.kv_size |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.merged_qkv |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.wo |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.scale |
1 | 0 | 0 |
attr |
ViTMultiHeadDotProductAttention.attn |
1 | 0 | 0 |
func |
select_tiling |
5 | 4 | 0 |
attr |
ImageProjectorMLP.merged_linear |
1 | 0 | 0 |
attr |
ImageProjectorMLP.act_fn |
1 | 0 | 0 |
attr |
ImageProjectorMLP.down_proj |
1 | 0 | 0 |
attr |
Molmo2VisionBlockCollection.resblocks |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.moonvit (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonVitEncoderLayer.init |
7 | 5 | 0 |
meth |
MoonVitEncoderLayer.attention_qkvpacked |
4 | 3 | 0 |
attr |
MoonVitEncoderLayer.use_data_parallel |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.num_heads |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.hidden_dim |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.tp_size |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.norm0 |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.norm1 |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.mlp |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.wqkv |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.wo |
1 | 0 | 0 |
attr |
MoonVitEncoderLayer.attn |
1 | 0 | 0 |
meth |
Learnable2DInterpPosEmb.reset_parameters |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmb.height |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmb.width |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmb.interpolation_mode |
1 | 0 | 0 |
attr |
Learnable2DInterpPosEmb.weight |
1 | 0 | 0 |
meth |
Rope2DPosEmb.init |
6 | 3 | 0 |
meth |
Rope2DPosEmb.extra_repr |
1 | 0 | 0 |
attr |
Rope2DPosEmb.dim |
1 | 0 | 0 |
attr |
Rope2DPosEmb.max_height |
1 | 0 | 0 |
attr |
Rope2DPosEmb.max_width |
1 | 0 | 0 |
attr |
Rope2DPosEmb.theta_base |
1 | 0 | 0 |
attr |
Rope2DPosEmb.device |
1 | 0 | 0 |
meth |
MoonVisionPatchEmbed.init |
6 | 5 | 0 |
attr |
MoonVisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
MoonVisionPatchEmbed.proj |
1 | 0 | 0 |
attr |
MoonVisionPatchEmbed.pos_emb |
1 | 0 | 0 |
meth |
MLP2.init |
5 | 3 | 0 |
attr |
MLP2.use_data_parallel |
1 | 0 | 0 |
attr |
MLP2.fc0 |
1 | 0 | 0 |
attr |
MLP2.fc1 |
1 | 0 | 0 |
attr |
MLP2.activation |
1 | 0 | 0 |
attr |
MoonVitEncoder.rope_2d |
1 | 0 | 0 |
attr |
MoonVitEncoder.blocks |
1 | 0 | 0 |
attr |
MoonVitEncoder.final_layernorm |
1 | 0 | 0 |
meth |
MoonVitPretrainedModel.init |
5 | 2 | 0 |
attr |
MoonVitPretrainedModel.merge_kernel_size |
1 | 0 | 0 |
attr |
MoonVitPretrainedModel.hidden_size |
1 | 0 | 0 |
attr |
MoonVitPretrainedModel.patch_size |
1 | 0 | 0 |
attr |
MoonVitPretrainedModel.vit_processing_type |
1 | 0 | 0 |
attr |
MoonVitPretrainedModel.patch_embed |
1 | 0 | 0 |
attr |
MoonVitPretrainedModel.encoder |
1 | 0 | 0 |
vllm.model_executor.models.mpt (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MPTForCausalLM.init |
3 | 2 | 0 |
attr |
MPTForCausalLM.config |
1 | 0 | 0 |
attr |
MPTForCausalLM.quant_config |
1 | 0 | 0 |
attr |
MPTForCausalLM.transformer |
1 | 0 | 0 |
attr |
MPTForCausalLM.lm_head |
1 | 0 | 0 |
attr |
MPTForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
MPTForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
MPTMLP.init |
4 | 3 | 0 |
attr |
MPTMLP.up_proj |
1 | 0 | 0 |
attr |
MPTMLP.act |
1 | 0 | 0 |
attr |
MPTMLP.down_proj |
1 | 0 | 0 |
meth |
MPTBlock.init |
5 | 4 | 0 |
attr |
MPTBlock.norm_1 |
1 | 0 | 0 |
attr |
MPTBlock.attn |
1 | 0 | 0 |
attr |
MPTBlock.norm_2 |
1 | 0 | 0 |
attr |
MPTBlock.ffn |
1 | 0 | 0 |
meth |
MPTModel.init |
3 | 2 | 0 |
attr |
MPTModel.wte |
1 | 0 | 0 |
attr |
MPTModel.norm_f |
1 | 0 | 0 |
attr |
MPTModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
MPTAttention.init |
5 | 4 | 0 |
attr |
MPTAttention.d_model |
1 | 0 | 0 |
attr |
MPTAttention.total_num_heads |
1 | 0 | 0 |
attr |
MPTAttention.head_dim |
1 | 0 | 0 |
attr |
MPTAttention.clip_qkv |
1 | 0 | 0 |
attr |
MPTAttention.qk_ln |
1 | 0 | 0 |
attr |
MPTAttention.alibi_bias_max |
1 | 0 | 0 |
attr |
MPTAttention.Wqkv |
1 | 0 | 0 |
attr |
MPTAttention.out_proj |
1 | 0 | 0 |
attr |
MPTAttention.num_heads |
1 | 0 | 0 |
attr |
MPTAttention.num_kv_heads |
1 | 0 | 0 |
attr |
MPTAttention.q_size |
1 | 0 | 0 |
attr |
MPTAttention.kv_size |
1 | 0 | 0 |
attr |
MPTAttention.attn |
1 | 0 | 0 |
attr |
MPTAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
MPTAttention.q_ln |
1 | 0 | 0 |
attr |
MPTAttention.k_ln |
1 | 0 | 0 |
vllm.model_executor.models.musicflamingo (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicFlamingoProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
MusicFlamingoProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
MusicFlamingoProcessingInfo.get_feature_extractor |
2 | 1 | 0 |
vllm.model_executor.models.nano_nemotron_vl (61 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseNanoNemotronVLProcessor.init |
7 | 5 | 0 |
meth |
BaseNanoNemotronVLProcessor._make_batch_input |
2 | 1 | 0 |
attr |
BaseNanoNemotronVLProcessor.config |
1 | 0 | 0 |
attr |
BaseNanoNemotronVLProcessor.tokenizer |
1 | 0 | 0 |
attr |
BaseNanoNemotronVLProcessor.max_num_tiles |
1 | 0 | 0 |
attr |
BaseNanoNemotronVLProcessor.num_image_token |
1 | 0 | 0 |
attr |
BaseNanoNemotronVLProcessor.image_size |
1 | 0 | 0 |
attr |
BaseNanoNemotronVLProcessor.norm_mean |
1 | 0 | 0 |
attr |
BaseNanoNemotronVLProcessor.norm_std |
1 | 0 | 0 |
meth |
NanoNemotronVLProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
NanoNemotronVLProcessingInfo.get_supported_mm_limits |
1 | 0 | 0 |
prop |
NanoNemotronVLProcessingInfo.supports_video |
1 | 0 | 0 |
meth |
NemotronH_Nano_VL_V2.init |
3 | 2 | 0 |
meth |
NemotronH_Nano_VL_V2.pixel_shuffle |
3 | 0 | 0 |
meth |
NemotronH_Nano_VL_V2.extract_feature_dynamic |
3 | 2 | 0 |
meth |
NemotronH_Nano_VL_V2.extract_feature |
2 | 1 | 0 |
meth |
NemotronH_Nano_VL_V2.load_weights |
2 | 1 | 0 |
meth |
NemotronH_Nano_VL_V2.get_vit_model_from_radio_config |
2 | 0 | 0 |
meth |
NemotronH_Nano_VL_V2.copy_inputs_before_cuda_graphs |
3 | 0 | 0 |
meth |
NemotronH_Nano_VL_V2.get_seqlen_agnostic_capture_inputs |
2 | 1 | 0 |
meth |
NemotronH_Nano_VL_V2.get_mamba_state_shape_from_config |
2 | 1 | 0 |
meth |
NemotronH_Nano_VL_V2.get_mamba_state_dtype_from_config |
2 | 1 | 0 |
meth |
NemotronH_Nano_VL_V2.get_mamba_state_copy_func |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.patch_size |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.template |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.num_image_token |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.downsample_ratio |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.ps_version |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.image_tag_type |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.video_pruning_rate |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.llm_dtype |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.config |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.model_config |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.dynamic_resolution |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.language_model |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.vision_model |
1 | 0 | 0 |
attr |
NemotronH_Nano_VL_V2.mlp1 |
1 | 0 | 0 |
meth |
NanoNemotronVLProcessor._preprocess_video |
4 | 3 | 0 |
meth |
NanoNemotronVLProcessor._preprocess_audio |
3 | 2 | 0 |
attr |
NanoNemotronVLProcessor.video_token |
1 | 0 | 0 |
attr |
NanoNemotronVLProcessor.video_pruning_rate |
1 | 0 | 0 |
func |
calculate_timestamps |
3 | 2 | 0 |
attr |
DynamicResolutionImageTiler.norm_mean |
1 | 0 | 0 |
attr |
DynamicResolutionImageTiler.norm_std |
1 | 0 | 0 |
func |
dynamic_preprocess |
6 | 0 | 0 |
func |
input_conditioner |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.nemotron (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NemotronMLP.forward |
2 | 0 | 0 |
attr |
NemotronMLP.up_proj |
1 | 0 | 0 |
attr |
NemotronMLP.down_proj |
1 | 0 | 0 |
attr |
NemotronMLP.act_fn |
1 | 0 | 0 |
meth |
NemotronForCausalLM.init |
3 | 2 | 0 |
attr |
NemotronForCausalLM.config |
1 | 0 | 0 |
attr |
NemotronForCausalLM.quant_config |
1 | 0 | 0 |
attr |
NemotronForCausalLM.model |
1 | 0 | 0 |
attr |
NemotronForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
NemotronForCausalLM.lm_head |
1 | 0 | 0 |
attr |
NemotronForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
NemotronLayerNorm1P.init |
7 | 4 | 0 |
attr |
NemotronAttention.hidden_size |
1 | 0 | 0 |
attr |
NemotronAttention.total_num_heads |
1 | 0 | 0 |
attr |
NemotronAttention.num_heads |
1 | 0 | 0 |
attr |
NemotronAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
NemotronAttention.num_kv_heads |
1 | 0 | 0 |
attr |
NemotronAttention.head_dim |
1 | 0 | 0 |
attr |
NemotronAttention.q_size |
1 | 0 | 0 |
attr |
NemotronAttention.kv_size |
1 | 0 | 0 |
attr |
NemotronAttention.scaling |
1 | 0 | 0 |
attr |
NemotronAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
NemotronAttention.qkv_proj |
1 | 0 | 0 |
attr |
NemotronAttention.o_proj |
1 | 0 | 0 |
attr |
NemotronAttention.rotary_emb |
1 | 0 | 0 |
attr |
NemotronAttention.attn |
1 | 0 | 0 |
meth |
NemotronModel.init |
3 | 2 | 0 |
attr |
NemotronModel.config |
1 | 0 | 0 |
attr |
NemotronModel.quant_config |
1 | 0 | 0 |
attr |
NemotronModel.vocab_size |
1 | 0 | 0 |
attr |
NemotronModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
NemotronModel.embed_tokens |
1 | 0 | 0 |
attr |
NemotronModel.norm |
1 | 0 | 0 |
attr |
NemotronDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
NemotronDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
NemotronDecoderLayer.mlp |
1 | 0 | 0 |
attr |
NemotronDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
NemotronDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.nemotron_h (85 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NemotronHAttention.forward |
3 | 2 | 0 |
attr |
NemotronHAttention.hidden_size |
1 | 0 | 0 |
attr |
NemotronHAttention.total_num_heads |
1 | 0 | 0 |
attr |
NemotronHAttention.num_heads |
1 | 0 | 0 |
attr |
NemotronHAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
NemotronHAttention.num_kv_heads |
1 | 0 | 0 |
attr |
NemotronHAttention.q_size |
1 | 0 | 0 |
attr |
NemotronHAttention.kv_size |
1 | 0 | 0 |
attr |
NemotronHAttention.scaling |
1 | 0 | 0 |
attr |
NemotronHAttention.qkv_proj |
1 | 0 | 0 |
attr |
NemotronHAttention.o_proj |
1 | 0 | 0 |
attr |
NemotronHAttention.attn |
1 | 0 | 0 |
attr |
NemotronHAttention.head_dim |
1 | 0 | 0 |
meth |
NemotronHMLPDecoderLayer.forward |
4 | 2 | 0 |
attr |
NemotronHMLPDecoderLayer.config |
1 | 0 | 0 |
attr |
NemotronHMLPDecoderLayer.mixer |
1 | 0 | 0 |
attr |
NemotronHMLPDecoderLayer.norm |
1 | 0 | 0 |
meth |
NemotronHAttentionDecoderLayer.forward |
5 | 3 | 0 |
attr |
NemotronHAttentionDecoderLayer.mixer |
1 | 0 | 0 |
attr |
NemotronHAttentionDecoderLayer.norm |
1 | 0 | 0 |
meth |
NemotronHMLP.forward |
2 | 1 | 0 |
attr |
NemotronHMLP.up_proj |
1 | 0 | 0 |
attr |
NemotronHMLP.down_proj |
1 | 0 | 0 |
attr |
NemotronHMLP.act_fn |
1 | 0 | 0 |
meth |
NemotronHMoE.init |
5 | 4 | 0 |
attr |
NemotronHMoE.tp_size |
1 | 0 | 0 |
attr |
NemotronHMoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
NemotronHMoE.ep_group |
1 | 0 | 0 |
attr |
NemotronHMoE.ep_rank |
1 | 0 | 0 |
attr |
NemotronHMoE.ep_size |
1 | 0 | 0 |
attr |
NemotronHMoE.is_sequence_parallel |
1 | 0 | 0 |
attr |
NemotronHMoE.gate |
1 | 0 | 0 |
attr |
NemotronHMoE.enable_eplb |
1 | 0 | 0 |
attr |
NemotronHMoE.n_redundant_experts |
1 | 0 | 0 |
attr |
NemotronHMoE.n_logical_experts |
1 | 0 | 0 |
attr |
NemotronHMoE.n_physical_experts |
1 | 0 | 0 |
attr |
NemotronHMoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
NemotronHMoE.physical_expert_start |
1 | 0 | 0 |
attr |
NemotronHMoE.physical_expert_end |
1 | 0 | 0 |
attr |
NemotronHMoE.experts |
1 | 0 | 0 |
attr |
NemotronHMoE.shared_experts |
1 | 0 | 0 |
attr |
NemotronHMoE.fc1_latent_proj |
1 | 0 | 0 |
attr |
NemotronHMoE.fc2_latent_proj |
1 | 0 | 0 |
meth |
NemotronHMambaDecoderLayer.forward |
4 | 2 | 0 |
attr |
NemotronHMambaDecoderLayer.config |
1 | 0 | 0 |
attr |
NemotronHMambaDecoderLayer.mixer |
1 | 0 | 0 |
attr |
NemotronHMambaDecoderLayer.norm |
1 | 0 | 0 |
meth |
NemotronHModel.init |
3 | 2 | 0 |
attr |
NemotronHModel.config |
1 | 0 | 0 |
attr |
NemotronHModel.vocab_size |
1 | 0 | 0 |
attr |
NemotronHModel.embed_tokens |
1 | 0 | 0 |
attr |
NemotronHModel.has_moe |
1 | 0 | 0 |
attr |
NemotronHModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
NemotronHModel.norm_f |
1 | 0 | 0 |
meth |
NemotronHForCausalLM.init |
3 | 2 | 0 |
meth |
NemotronHForCausalLM.forward |
6 | 4 | 0 |
attr |
NemotronHForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.model_config |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.quant_config |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.config |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.model |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.lm_head |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.expert_weights |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.num_expert_groups |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.moe_layers |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.num_logical_experts |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.num_physical_experts |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.num_local_physical_experts |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.num_routed_experts |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.num_shared_experts |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.num_redundant_experts |
1 | 0 | 0 |
meth |
NemotronHMoEDecoderLayer.forward |
4 | 2 | 0 |
attr |
NemotronHMoEDecoderLayer.config |
1 | 0 | 0 |
attr |
NemotronHMoEDecoderLayer.mixer |
1 | 0 | 0 |
attr |
NemotronHMoEDecoderLayer.norm |
1 | 0 | 0 |
vllm.model_executor.models.nemotron_h_mtp (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
NemotronHMTPMoEDecoderLayer.has_start_projections |
1 | 0 | 0 |
attr |
NemotronHMTPMoEDecoderLayer.has_end_norm |
1 | 0 | 0 |
attr |
NemotronHMTPMoEDecoderLayer.enorm |
1 | 0 | 0 |
attr |
NemotronHMTPMoEDecoderLayer.hnorm |
1 | 0 | 0 |
attr |
NemotronHMTPMoEDecoderLayer.eh_proj |
1 | 0 | 0 |
attr |
NemotronHMTPMoEDecoderLayer.final_layernorm |
1 | 0 | 0 |
meth |
NemotronHMTP.init |
3 | 2 | 0 |
attr |
NemotronHMTP.vllm_config |
1 | 0 | 0 |
attr |
NemotronHMTP.config |
1 | 0 | 0 |
attr |
NemotronHMTP.quant_config |
1 | 0 | 0 |
attr |
NemotronHMTP.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
NemotronHMTP.num_redundant_experts |
1 | 0 | 0 |
attr |
NemotronHMTP.model |
1 | 0 | 0 |
attr |
NemotronHMTP.lm_head |
1 | 0 | 0 |
attr |
NemotronHMTP.logits_processor |
1 | 0 | 0 |
attr |
NemotronHMTP.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
NemotronHMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
NemotronHMultiTokenPredictor.config |
1 | 0 | 0 |
attr |
NemotronHMultiTokenPredictor.vocab_size |
1 | 0 | 0 |
attr |
NemotronHMultiTokenPredictor.org_vocab_size |
1 | 0 | 0 |
attr |
NemotronHMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
NemotronHMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
NemotronHMultiTokenPredictor.pattern_str |
1 | 0 | 0 |
attr |
NemotronHMultiTokenPredictor.pattern_len |
1 | 0 | 0 |
attr |
NemotronHMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
NemotronHMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
NemotronHMTPAttentionDecoderLayer.has_start_projections |
1 | 0 | 0 |
attr |
NemotronHMTPAttentionDecoderLayer.has_end_norm |
1 | 0 | 0 |
attr |
NemotronHMTPAttentionDecoderLayer.enorm |
1 | 0 | 0 |
attr |
NemotronHMTPAttentionDecoderLayer.hnorm |
1 | 0 | 0 |
attr |
NemotronHMTPAttentionDecoderLayer.eh_proj |
1 | 0 | 0 |
attr |
NemotronHMTPAttentionDecoderLayer.final_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.nemotron_nas (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeciLMAttention._init_rotary_emb |
3 | 2 | 0 |
meth |
DeciLMForCausalLM.init |
3 | 2 | 0 |
meth |
DeciLMForCausalLM._init_model |
3 | 2 | 0 |
attr |
DeciLMForCausalLM.config |
1 | 0 | 0 |
attr |
DeciLMForCausalLM.model |
1 | 0 | 0 |
attr |
DeciLMForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
DeciLMForCausalLM.lm_head |
1 | 0 | 0 |
attr |
DeciLMForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
DeciModel.init |
4 | 3 | 0 |
attr |
DeciModel.config |
1 | 0 | 0 |
attr |
DeciModel.quant_config |
1 | 0 | 0 |
attr |
DeciModel.vocab_size |
1 | 0 | 0 |
attr |
DeciModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
DeciModel.embed_tokens |
1 | 0 | 0 |
attr |
DeciModel.norm |
1 | 0 | 0 |
attr |
DeciLMDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
DeciLMDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
DeciLMDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
DeciLMDecoderLayer.mlp |
1 | 0 | 0 |
attr |
DeciLMDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.nemotron_parse (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MBartDecoderNoPos.init |
7 | 6 | 0 |
meth |
MBartDecoderNoPos.forward |
5 | 4 | 0 |
attr |
MBartDecoderNoPos.cache_config |
1 | 0 | 0 |
attr |
MBartDecoderNoPos.quant_config |
1 | 0 | 0 |
attr |
MBartDecoderNoPos.lora_config |
1 | 0 | 0 |
attr |
MBartDecoderNoPos.embed_tokens |
1 | 0 | 0 |
attr |
MBartDecoderNoPos.layers |
1 | 0 | 0 |
attr |
MBartDecoderNoPos.layernorm_embedding |
1 | 0 | 0 |
attr |
MBartDecoderNoPos.layer_norm |
1 | 0 | 0 |
meth |
NemotronParseProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
NemotronParseProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
BartScaledWordEmbedding.init |
4 | 3 | 0 |
attr |
BartScaledWordEmbedding.embed_scale |
1 | 0 | 0 |
meth |
BartParallelLMHead.init |
4 | 3 | 0 |
attr |
BartParallelLMHead.embed_scale |
1 | 0 | 0 |
meth |
NemotronParseProcessor.init |
4 | 3 | 0 |
meth |
NemotronParseProcessor._make_batch_input |
2 | 0 | 0 |
meth |
NemotronParseProcessor.call |
5 | 4 | 0 |
attr |
NemotronParseProcessor.config |
1 | 0 | 0 |
attr |
NemotronParseProcessor.tokenizer |
1 | 0 | 0 |
attr |
NemotronParseProcessor.image_processor |
1 | 0 | 0 |
meth |
NemotronParseImageProcessor.init |
3 | 1 | 0 |
meth |
NemotronParseImageProcessor._create_transforms |
1 | 0 | 0 |
meth |
NemotronParseImageProcessor.preprocess |
3 | 2 | 0 |
meth |
NemotronParseImageProcessor.call |
3 | 2 | 0 |
attr |
NemotronParseImageProcessor.norm_mean |
1 | 0 | 0 |
attr |
NemotronParseImageProcessor.norm_std |
1 | 0 | 0 |
attr |
NemotronParseImageProcessor.final_size |
1 | 0 | 0 |
meth |
RadioWithNeck.init |
4 | 3 | 0 |
meth |
RadioWithNeck.forward |
3 | 2 | 0 |
meth |
RadioWithNeck.load_weights |
2 | 1 | 0 |
attr |
RadioWithNeck.config |
1 | 0 | 0 |
attr |
RadioWithNeck.model_encoder |
1 | 0 | 0 |
attr |
RadioWithNeck.conv1 |
1 | 0 | 0 |
attr |
RadioWithNeck.layer_norm1 |
1 | 0 | 0 |
attr |
RadioWithNeck.conv2 |
1 | 0 | 0 |
attr |
RadioWithNeck.layer_norm2 |
1 | 0 | 0 |
attr |
RadioWithNeck.sum_proj |
1 | 0 | 0 |
attr |
RadioWithNeck.layer_norm3 |
1 | 0 | 0 |
meth |
BartDecoderLayer.init |
5 | 4 | 0 |
attr |
BartDecoderLayer.embed_dim |
1 | 0 | 0 |
attr |
BartDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
BartDecoderLayer.activation_fn |
1 | 0 | 0 |
attr |
BartDecoderLayer.self_attn_layer_norm |
1 | 0 | 0 |
attr |
BartDecoderLayer.encoder_attn |
1 | 0 | 0 |
attr |
BartDecoderLayer.encoder_attn_layer_norm |
1 | 0 | 0 |
attr |
BartDecoderLayer.fc1 |
1 | 0 | 0 |
attr |
BartDecoderLayer.fc2 |
1 | 0 | 0 |
attr |
BartDecoderLayer.final_layer_norm |
1 | 0 | 0 |
meth |
NemotronParseForConditionalGeneration.init |
3 | 2 | 0 |
meth |
NemotronParseForConditionalGeneration.forward |
5 | 4 | 0 |
meth |
NemotronParseForConditionalGeneration.load_weights |
2 | 1 | 0 |
attr |
NemotronParseForConditionalGeneration.config |
1 | 0 | 0 |
attr |
NemotronParseForConditionalGeneration.vision_config |
1 | 0 | 0 |
attr |
NemotronParseForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
NemotronParseForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
NemotronParseForConditionalGeneration.logits_processor |
1 | 0 | 0 |
attr |
NemotronParseForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
NemotronParseForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.nemotron_vl (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
build_siglip_transform |
2 | 1 | 0 |
attr |
LlamaNemotronVLForSequenceClassification.weight_mapper |
1 | 0 | 0 |
attr |
LlamaNemotronVLForSequenceClassification.score |
1 | 0 | 0 |
attr |
LlamaNemotronVLForSequenceClassification.pooler |
1 | 0 | 0 |
func |
build_transform |
2 | 1 | 0 |
attr |
NemotronVLProcessor.config |
1 | 0 | 0 |
attr |
NemotronVLProcessor.tokenizer |
1 | 0 | 0 |
attr |
NemotronVLProcessor.image_processor |
1 | 0 | 0 |
attr |
NemotronVLProcessor.num_image_token |
1 | 0 | 0 |
attr |
NemotronVLProcessor.image_size |
1 | 0 | 0 |
attr |
NemotronVLProcessor.min_dynamic_patch |
1 | 0 | 0 |
attr |
NemotronVLProcessor.max_dynamic_patch |
1 | 0 | 0 |
attr |
NemotronVLProcessor.dynamic_image_size |
1 | 0 | 0 |
attr |
NemotronVLProcessor.use_thumbnail |
1 | 0 | 0 |
meth |
NemotronVLProcessingInfo.get_image_processor |
2 | 1 | 0 |
meth |
LlamaNemotronVLChatModel._patch_quant_config |
3 | 2 | 0 |
meth |
LlamaNemotronVLChatModel._init_vision_model |
4 | 3 | 0 |
meth |
LlamaNemotronVLChatModel.pixel_shuffle |
3 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.config |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.multimodal_config |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.patch_size |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.num_image_token |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.downsample_ratio |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.ps_version |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.img_context_token_id |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.visual_token_mask |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.vision_model |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.mlp1 |
1 | 0 | 0 |
attr |
LlamaNemotronVLChatModel.language_model |
1 | 0 | 0 |
meth |
LlamaNemotronVLForEmbedding._init_vision_model |
4 | 3 | 0 |
attr |
LlamaNemotronVLForEmbedding.weight_mapper |
1 | 0 | 0 |
attr |
LlamaNemotronVLForEmbedding.img_context_token_id |
1 | 0 | 0 |
attr |
LlamaNemotronVLForEmbedding.pooler |
1 | 0 | 0 |
vllm.model_executor.models.nvlm_d (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NVLM_D_Model._init_vision_model |
5 | 4 | 0 |
vllm.model_executor.models.olmo (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoAttention.init |
5 | 4 | 0 |
attr |
OlmoAttention.config |
1 | 0 | 0 |
attr |
OlmoAttention.hidden_size |
1 | 0 | 0 |
attr |
OlmoAttention.total_num_heads |
1 | 0 | 0 |
attr |
OlmoAttention.num_heads |
1 | 0 | 0 |
attr |
OlmoAttention.head_dim |
1 | 0 | 0 |
attr |
OlmoAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
OlmoAttention.clip_qkv |
1 | 0 | 0 |
attr |
OlmoAttention.qkv_proj |
1 | 0 | 0 |
attr |
OlmoAttention.rotary_emb |
1 | 0 | 0 |
attr |
OlmoAttention.scaling |
1 | 0 | 0 |
attr |
OlmoAttention.attn |
1 | 0 | 0 |
attr |
OlmoAttention.o_proj |
1 | 0 | 0 |
meth |
OlmoForCausalLM.init |
3 | 2 | 0 |
attr |
OlmoForCausalLM.config |
1 | 0 | 0 |
attr |
OlmoForCausalLM.model |
1 | 0 | 0 |
attr |
OlmoForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
OlmoForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OlmoForCausalLM.lm_head |
1 | 0 | 0 |
meth |
OlmoMLP.init |
4 | 3 | 0 |
attr |
OlmoMLP.config |
1 | 0 | 0 |
attr |
OlmoMLP.hidden_size |
1 | 0 | 0 |
attr |
OlmoMLP.intermediate_size |
1 | 0 | 0 |
attr |
OlmoMLP.gate_up_proj |
1 | 0 | 0 |
attr |
OlmoMLP.act_fn |
1 | 0 | 0 |
attr |
OlmoMLP.down_proj |
1 | 0 | 0 |
meth |
OlmoModel.init |
3 | 2 | 0 |
attr |
OlmoModel.config |
1 | 0 | 0 |
attr |
OlmoModel.embed_tokens |
1 | 0 | 0 |
attr |
OlmoModel.norm |
1 | 0 | 0 |
attr |
OlmoModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
OlmoDecoderLayer.init |
5 | 4 | 0 |
attr |
OlmoDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
OlmoDecoderLayer.mlp |
1 | 0 | 0 |
attr |
OlmoDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
OlmoDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.olmo2 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo2ForCausalLM.init |
3 | 2 | 0 |
meth |
Olmo2ForCausalLM.load_weights |
2 | 1 | 0 |
attr |
Olmo2ForCausalLM.config |
1 | 0 | 0 |
attr |
Olmo2ForCausalLM.model |
1 | 0 | 0 |
attr |
Olmo2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Olmo2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Olmo2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Olmo2Model.init |
3 | 2 | 0 |
attr |
Olmo2Model.config |
1 | 0 | 0 |
attr |
Olmo2Model.embed_tokens |
1 | 0 | 0 |
attr |
Olmo2Model.norm |
1 | 0 | 0 |
attr |
Olmo2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Olmo2Attention.init |
3 | 2 | 0 |
attr |
Olmo2Attention.config |
1 | 0 | 0 |
attr |
Olmo2Attention.tp_size |
1 | 0 | 0 |
attr |
Olmo2Attention.total_num_heads |
1 | 0 | 0 |
attr |
Olmo2Attention.num_heads |
1 | 0 | 0 |
attr |
Olmo2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Olmo2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Olmo2Attention.head_dim |
1 | 0 | 0 |
attr |
Olmo2Attention.q_size |
1 | 0 | 0 |
attr |
Olmo2Attention.kv_size |
1 | 0 | 0 |
attr |
Olmo2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Olmo2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Olmo2Attention.tp_rank |
1 | 0 | 0 |
attr |
Olmo2Attention.k_norm |
1 | 0 | 0 |
attr |
Olmo2Attention.q_norm |
1 | 0 | 0 |
attr |
Olmo2Attention.scaling |
1 | 0 | 0 |
attr |
Olmo2Attention.attn |
1 | 0 | 0 |
attr |
Olmo2Attention.rotary_emb |
1 | 0 | 0 |
attr |
Olmo2Attention.o_proj |
1 | 0 | 0 |
meth |
Olmo2MLP.init |
3 | 2 | 0 |
attr |
Olmo2MLP.gate_up_proj |
1 | 0 | 0 |
attr |
Olmo2MLP.act_fn |
1 | 0 | 0 |
attr |
Olmo2MLP.down_proj |
1 | 0 | 0 |
meth |
Olmo2DecoderLayer.init |
3 | 2 | 0 |
attr |
Olmo2DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Olmo2DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Olmo2DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Olmo2DecoderLayer.post_feedforward_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.olmoe (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
OlmoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
OlmoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
OlmoeDecoderLayer.mlp |
1 | 0 | 0 |
attr |
OlmoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
OlmoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
OlmoeMoE.init |
9 | 8 | 0 |
attr |
OlmoeMoE.hidden_size |
1 | 0 | 0 |
attr |
OlmoeMoE.gate |
1 | 0 | 0 |
attr |
OlmoeMoE.experts |
1 | 0 | 0 |
meth |
OlmoeModel.init |
4 | 3 | 0 |
attr |
OlmoeModel.vocab_size |
1 | 0 | 0 |
attr |
OlmoeModel.config |
1 | 0 | 0 |
attr |
OlmoeModel.embed_tokens |
1 | 0 | 0 |
attr |
OlmoeModel.norm |
1 | 0 | 0 |
attr |
OlmoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
OlmoeForCausalLM.init |
4 | 3 | 0 |
attr |
OlmoeForCausalLM.config |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.model |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OlmoeAttention.hidden_size |
1 | 0 | 0 |
attr |
OlmoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
OlmoeAttention.num_heads |
1 | 0 | 0 |
attr |
OlmoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
OlmoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
OlmoeAttention.head_dim |
1 | 0 | 0 |
attr |
OlmoeAttention.q_size |
1 | 0 | 0 |
attr |
OlmoeAttention.kv_size |
1 | 0 | 0 |
attr |
OlmoeAttention.scaling |
1 | 0 | 0 |
attr |
OlmoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
OlmoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
OlmoeAttention.tp_size |
1 | 0 | 0 |
attr |
OlmoeAttention.tp_rank |
1 | 0 | 0 |
attr |
OlmoeAttention.q_norm |
1 | 0 | 0 |
attr |
OlmoeAttention.k_norm |
1 | 0 | 0 |
attr |
OlmoeAttention.o_proj |
1 | 0 | 0 |
attr |
OlmoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
OlmoeAttention.attn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.opencua (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OpenCUAProcessor.init |
4 | 2 | 0 |
meth |
OpenCUAProcessor.call |
5 | 0 | 0 |
attr |
OpenCUAProcessor.image_token |
1 | 0 | 0 |
meth |
OpenCUAForConditionalGeneration.init |
3 | 2 | 0 |
attr |
OpenCUAForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.config |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.vllm_config |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.is_multimodal_pruning_enabled |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
OpenCUAForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
OpenCUAProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
OpenCUAProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
OpenCUAProcessingInfo.get_hf_processor |
2 | 1 | 0 |
vllm.model_executor.models.openpangu (128 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
OpenPanguMLAAttention.hidden_size |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.num_heads |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.qk_nope_head_dim |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.qk_rope_head_dim |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.qk_head_dim |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.v_head_dim |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.q_lora_rank |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.kv_lora_rank |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.tp_size |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.num_local_heads |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.scaling |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.prefix |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.kv_a_layernorm |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.kv_b_proj |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.o_proj |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.rotary_emb |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.mla_attn |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.fused_qkv_a_proj |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.q_a_layernorm |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.q_b_proj |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.q_proj |
1 | 0 | 0 |
attr |
OpenPanguMLAAttention.kv_a_proj_with_mqa |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.hidden_size |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.total_num_heads |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.num_heads |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.num_kv_heads |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.head_dim |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.q_size |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.kv_size |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.scaling |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.qkv_proj |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.o_proj |
1 | 0 | 0 |
attr |
OpenPanguEmbeddedAttention.attn |
1 | 0 | 0 |
meth |
OpenPanguMLP.init |
9 | 8 | 0 |
attr |
OpenPanguMLP.gate_up_proj |
1 | 0 | 0 |
attr |
OpenPanguMLP.down_proj |
1 | 0 | 0 |
attr |
OpenPanguMLP.act_fn |
1 | 0 | 0 |
meth |
OpenPanguModelBase.init |
3 | 2 | 0 |
attr |
OpenPanguModelBase.config |
1 | 0 | 0 |
attr |
OpenPanguModelBase.quant_config |
1 | 0 | 0 |
attr |
OpenPanguModelBase.fuse_qkv_a_proj |
1 | 0 | 0 |
attr |
OpenPanguModelBase.model |
1 | 0 | 0 |
attr |
OpenPanguModelBase.logits_processor |
1 | 0 | 0 |
attr |
OpenPanguModelBase.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OpenPanguModelBase.lm_head |
1 | 0 | 0 |
meth |
OpenPanguEmbeddedModel.init |
3 | 2 | 0 |
meth |
OpenPanguSinkAttention.weight_loader |
3 | 2 | 0 |
attr |
OpenPanguSinkAttention.hidden_size |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.tp_size |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.tp_rank |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.total_num_heads |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.num_heads |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.num_kv_heads |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.qk_nope_dim |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.qk_rope_dim |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.v_channels |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.head_dim |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.q_size |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.k_size |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.v_size |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.scaling |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.param_sink_number |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.param_sink_with_value |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.param_sink_scalar |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.param_sink_of_head_num |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.qkv_proj |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.o_proj |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.k_layernorm |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.attn |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.param_sink_key |
1 | 0 | 0 |
attr |
OpenPanguSinkAttention.param_sink_value |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.use_mla |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.use_sink_attention |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.routed_scaling_factor |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.num_hidden_layers |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.first_k_dense_replace |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.tp_group |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.sandwich_norm |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.mlp |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.pre_mlp_layernorm |
1 | 0 | 0 |
attr |
OpenPanguDecoderLayer.post_mlp_layernorm |
1 | 0 | 0 |
func |
check_ffn_act_fn |
2 | 1 | 0 |
meth |
OpenPanguMoE.init |
5 | 4 | 0 |
attr |
OpenPanguMoE.tp_size |
1 | 0 | 0 |
attr |
OpenPanguMoE.tp_rank |
1 | 0 | 0 |
attr |
OpenPanguMoE.routed_scaling_factor |
1 | 0 | 0 |
attr |
OpenPanguMoE.ep_group |
1 | 0 | 0 |
attr |
OpenPanguMoE.ep_rank |
1 | 0 | 0 |
attr |
OpenPanguMoE.ep_size |
1 | 0 | 0 |
attr |
OpenPanguMoE.is_sequence_parallel |
1 | 0 | 0 |
attr |
OpenPanguMoE.gate |
1 | 0 | 0 |
attr |
OpenPanguMoE.enable_eplb |
1 | 0 | 0 |
attr |
OpenPanguMoE.n_redundant_experts |
1 | 0 | 0 |
attr |
OpenPanguMoE.n_logical_experts |
1 | 0 | 0 |
attr |
OpenPanguMoE.n_physical_experts |
1 | 0 | 0 |
attr |
OpenPanguMoE.n_local_physical_experts |
1 | 0 | 0 |
attr |
OpenPanguMoE.physical_expert_start |
1 | 0 | 0 |
attr |
OpenPanguMoE.physical_expert_end |
1 | 0 | 0 |
attr |
OpenPanguMoE.experts |
1 | 0 | 0 |
attr |
OpenPanguMoE.shared_experts |
1 | 0 | 0 |
meth |
OpenPanguModel.init |
3 | 2 | 0 |
attr |
OpenPanguModel.config |
1 | 0 | 0 |
attr |
OpenPanguModel.num_redundant_experts |
1 | 0 | 0 |
attr |
OpenPanguModel.vocab_size |
1 | 0 | 0 |
attr |
OpenPanguModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OpenPanguModel.embed_tokens |
1 | 0 | 0 |
attr |
OpenPanguModel.norm |
1 | 0 | 0 |
meth |
OpenPanguMoEModel.init |
3 | 2 | 0 |
attr |
OpenPanguMoEModel.expert_weights |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.num_moe_layers |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.num_expert_groups |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.moe_layers |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.num_logical_experts |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.num_physical_experts |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.num_local_physical_experts |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.n_routed_experts |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.n_shared_experts |
1 | 0 | 0 |
attr |
OpenPanguMoEModel.num_redundant_experts |
1 | 0 | 0 |
vllm.model_executor.models.openpangu_mtp (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
OpenPanguMultiTokenPredictorLayer.config |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictorLayer.enorm |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictorLayer.hnorm |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictorLayer.eh_proj |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictorLayer.shared_head |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
meth |
OpenPanguMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
OpenPanguMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
OpenPanguMultiTokenPredictor.logits_processor |
1 | 0 | 0 |
meth |
OpenPanguMTP.init |
3 | 2 | 0 |
meth |
OpenPanguMTP.get_spec_layer |
2 | 0 | 0 |
attr |
OpenPanguMTP.config |
1 | 0 | 0 |
attr |
OpenPanguMTP.model |
1 | 0 | 0 |
vllm.model_executor.models.openpangu_vl (93 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ProjectionSingle.init |
3 | 2 | 0 |
meth |
ProjectionSingle.forward |
2 | 0 | 0 |
attr |
ProjectionSingle.act |
1 | 0 | 0 |
attr |
ProjectionSingle.fc1 |
1 | 0 | 0 |
attr |
OpenPanguVisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
OpenPanguVisionAttention.tp_size |
1 | 0 | 0 |
attr |
OpenPanguVisionAttention.tp_rank |
1 | 0 | 0 |
attr |
OpenPanguVisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
OpenPanguVisionAttention.qkv |
1 | 0 | 0 |
attr |
OpenPanguVisionAttention.proj |
1 | 0 | 0 |
attr |
OpenPanguVisionAttention.attn |
1 | 0 | 0 |
attr |
OpenPanguVisionAttention.apply_rotary_emb |
1 | 0 | 0 |
meth |
OpenPanguVLForConditionalGeneration.init |
3 | 2 | 0 |
meth |
OpenPanguVLForConditionalGeneration._parse_preprocess_params |
2 | 0 | 0 |
meth |
OpenPanguVLForConditionalGeneration._maybe_ignore_quant_config |
2 | 1 | 0 |
meth |
OpenPanguVLForConditionalGeneration._parse_and_validate_image_input |
2 | 1 | 0 |
meth |
OpenPanguVLForConditionalGeneration._parse_and_validate_video_input |
2 | 1 | 0 |
meth |
OpenPanguVLForConditionalGeneration.get_input_embeddings |
3 | 2 | 0 |
meth |
OpenPanguVLForConditionalGeneration._process_image_input |
2 | 1 | 0 |
meth |
OpenPanguVLForConditionalGeneration._process_video_input |
2 | 1 | 0 |
meth |
OpenPanguVLForConditionalGeneration.compute_logits |
3 | 2 | 0 |
attr |
OpenPanguVLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
OpenPanguVLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
OpenPanguVLForConditionalGeneration.vllm_config |
1 | 0 | 0 |
attr |
OpenPanguVLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OpenPanguVLForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
OpenPanguVLForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
normalize |
4 | 0 | 0 |
func |
rescale |
3 | 0 | 0 |
attr |
OpenPanguVisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
OpenPanguVisionPatchEmbed.temporal_patch_size |
1 | 0 | 0 |
attr |
OpenPanguVisionPatchEmbed.hidden_size |
1 | 0 | 0 |
attr |
OpenPanguVisionPatchEmbed.input_size |
1 | 0 | 0 |
attr |
OpenPanguVisionPatchEmbed.proj |
1 | 0 | 0 |
attr |
OpenPanguVisionRotaryEmbedding.inv_freq |
1 | 0 | 0 |
meth |
OpenPanguVLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
OpenPanguVLProcessingInfo.get_hf_processor |
6 | 5 | 0 |
attr |
OpenPanguVisionPatchMerger.hidden_size |
1 | 0 | 0 |
attr |
OpenPanguVisionPatchMerger.ln_q |
1 | 0 | 0 |
attr |
OpenPanguVisionPatchMerger.mlp |
1 | 0 | 0 |
meth |
OpenPanguVisionBlock.init |
9 | 8 | 0 |
attr |
OpenPanguVisionBlock.norm1 |
1 | 0 | 0 |
attr |
OpenPanguVisionBlock.norm2 |
1 | 0 | 0 |
attr |
OpenPanguVisionBlock.attn |
1 | 0 | 0 |
attr |
OpenPanguVisionBlock.mlp |
1 | 0 | 0 |
meth |
OpenPanguVisionTransformer.init |
8 | 4 | 0 |
meth |
OpenPanguVisionTransformer.cal_cos_sin |
2 | 0 | 0 |
meth |
OpenPanguVisionTransformer.get_window_index |
2 | 0 | 0 |
meth |
OpenPanguVisionTransformer.load_weights |
2 | 1 | 0 |
attr |
OpenPanguVisionTransformer.hidden_size |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.num_heads |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.window_size |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.patch_size |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.fullatt_block_indexes |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.spatial_merge_unit |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.interleaved |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.out_hidden_size |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.hidden_act |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.attn_backend |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.blocks |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.tp_size |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.tp_rank |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.select_layer |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.select_index |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.take_indices |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.final_layernorm |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.merger |
1 | 0 | 0 |
attr |
OpenPanguVisionTransformer.vision_projection |
1 | 0 | 0 |
meth |
OpenPanguVisionMLP.init |
8 | 6 | 0 |
meth |
OpenPanguVisionMLP.forward |
2 | 1 | 0 |
attr |
OpenPanguVisionMLP.hidden_act |
1 | 0 | 0 |
attr |
OpenPanguVisionMLP.down_proj |
1 | 0 | 0 |
attr |
OpenPanguVisionMLP.act_fn |
1 | 0 | 0 |
attr |
OpenPanguVisionMLP.gate_up_proj |
1 | 0 | 0 |
attr |
OpenPanguVisionMLP.up_proj |
1 | 0 | 0 |
vllm.model_executor.models.opt (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OPTDecoderLayer.init |
5 | 4 | 0 |
attr |
OPTDecoderLayer.config |
1 | 0 | 0 |
attr |
OPTDecoderLayer.embed_dim |
1 | 0 | 0 |
attr |
OPTDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
OPTDecoderLayer.do_layer_norm_before |
1 | 0 | 0 |
attr |
OPTDecoderLayer.self_attn_layer_norm |
1 | 0 | 0 |
attr |
OPTDecoderLayer.fc1 |
1 | 0 | 0 |
attr |
OPTDecoderLayer.activation_fn |
1 | 0 | 0 |
attr |
OPTDecoderLayer.fc2 |
1 | 0 | 0 |
attr |
OPTDecoderLayer.final_layer_norm |
1 | 0 | 0 |
meth |
OPTModel.init |
3 | 2 | 0 |
attr |
OPTModel.decoder |
1 | 0 | 0 |
attr |
OPTModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
OPTLearnedPositionalEmbedding.init |
3 | 2 | 0 |
meth |
OPTLearnedPositionalEmbedding.forward |
2 | 1 | 0 |
attr |
OPTLearnedPositionalEmbedding.offset |
1 | 0 | 0 |
meth |
OPTDecoder.init |
5 | 4 | 0 |
attr |
OPTDecoder.config |
1 | 0 | 0 |
attr |
OPTDecoder.max_target_positions |
1 | 0 | 0 |
attr |
OPTDecoder.vocab_size |
1 | 0 | 0 |
attr |
OPTDecoder.embed_tokens |
1 | 0 | 0 |
attr |
OPTDecoder.embed_positions |
1 | 0 | 0 |
attr |
OPTDecoder.project_out |
1 | 0 | 0 |
attr |
OPTDecoder.project_in |
1 | 0 | 0 |
attr |
OPTDecoder.final_layer_norm |
1 | 0 | 0 |
attr |
OPTAttention.embed_dim |
1 | 0 | 0 |
attr |
OPTAttention.num_heads |
1 | 0 | 0 |
attr |
OPTAttention.head_dim |
1 | 0 | 0 |
attr |
OPTAttention.scaling |
1 | 0 | 0 |
attr |
OPTAttention.qkv_proj |
1 | 0 | 0 |
attr |
OPTAttention.out_proj |
1 | 0 | 0 |
attr |
OPTAttention.attn |
1 | 0 | 0 |
meth |
OPTForCausalLM.init |
3 | 2 | 0 |
attr |
OPTForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
OPTForCausalLM.config |
1 | 0 | 0 |
attr |
OPTForCausalLM.quant_config |
1 | 0 | 0 |
attr |
OPTForCausalLM.model |
1 | 0 | 0 |
attr |
OPTForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
OPTForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OPTForCausalLM.lm_head |
1 | 0 | 0 |
vllm.model_executor.models.orion (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OrionModel.init |
3 | 2 | 0 |
attr |
OrionModel.config |
1 | 0 | 0 |
attr |
OrionModel.vocab_size |
1 | 0 | 0 |
attr |
OrionModel.embed_tokens |
1 | 0 | 0 |
attr |
OrionModel.norm |
1 | 0 | 0 |
attr |
OrionModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
OrionMLP.forward |
2 | 0 | 0 |
attr |
OrionMLP.gate_up_proj |
1 | 0 | 0 |
attr |
OrionMLP.down_proj |
1 | 0 | 0 |
attr |
OrionMLP.act_fn |
1 | 0 | 0 |
attr |
OrionAttention.hidden_size |
1 | 0 | 0 |
attr |
OrionAttention.total_num_heads |
1 | 0 | 0 |
attr |
OrionAttention.num_heads |
1 | 0 | 0 |
attr |
OrionAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
OrionAttention.num_kv_heads |
1 | 0 | 0 |
attr |
OrionAttention.head_dim |
1 | 0 | 0 |
attr |
OrionAttention.q_size |
1 | 0 | 0 |
attr |
OrionAttention.kv_size |
1 | 0 | 0 |
attr |
OrionAttention.scaling |
1 | 0 | 0 |
attr |
OrionAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
OrionAttention.qkv_proj |
1 | 0 | 0 |
attr |
OrionAttention.o_proj |
1 | 0 | 0 |
attr |
OrionAttention.rotary_emb |
1 | 0 | 0 |
attr |
OrionAttention.attn |
1 | 0 | 0 |
attr |
OrionDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
OrionDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
OrionDecoderLayer.mlp |
1 | 0 | 0 |
attr |
OrionDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
OrionDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
OrionForCausalLM.init |
3 | 2 | 0 |
attr |
OrionForCausalLM.config |
1 | 0 | 0 |
attr |
OrionForCausalLM.quant_config |
1 | 0 | 0 |
attr |
OrionForCausalLM.model |
1 | 0 | 0 |
attr |
OrionForCausalLM.lm_head |
1 | 0 | 0 |
attr |
OrionForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
OrionForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.ouro (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OuroModel.init |
4 | 3 | 0 |
attr |
OuroModel.config |
1 | 0 | 0 |
attr |
OuroModel.quant_config |
1 | 0 | 0 |
attr |
OuroModel.vocab_size |
1 | 0 | 0 |
attr |
OuroModel.embed_tokens |
1 | 0 | 0 |
attr |
OuroModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OuroModel.norm |
1 | 0 | 0 |
attr |
OuroModel.early_exit_gate |
1 | 0 | 0 |
attr |
OuroModel.total_ut_steps |
1 | 0 | 0 |
meth |
OuroMLP.forward |
2 | 0 | 0 |
attr |
OuroMLP.gate_up_proj |
1 | 0 | 0 |
attr |
OuroMLP.down_proj |
1 | 0 | 0 |
attr |
OuroMLP.act_fn |
1 | 0 | 0 |
meth |
OuroForCausalLM.init |
3 | 2 | 0 |
attr |
OuroForCausalLM.config |
1 | 0 | 0 |
attr |
OuroForCausalLM.quant_config |
1 | 0 | 0 |
attr |
OuroForCausalLM.model |
1 | 0 | 0 |
attr |
OuroForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
OuroForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
OuroForCausalLM.lm_head |
1 | 0 | 0 |
attr |
OuroAttention.hidden_size |
1 | 0 | 0 |
attr |
OuroAttention.total_num_heads |
1 | 0 | 0 |
attr |
OuroAttention.num_heads |
1 | 0 | 0 |
attr |
OuroAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
OuroAttention.num_kv_heads |
1 | 0 | 0 |
attr |
OuroAttention.head_dim |
1 | 0 | 0 |
attr |
OuroAttention.q_size |
1 | 0 | 0 |
attr |
OuroAttention.kv_size |
1 | 0 | 0 |
attr |
OuroAttention.scaling |
1 | 0 | 0 |
attr |
OuroAttention.dual_chunk_attention_config |
1 | 0 | 0 |
attr |
OuroAttention.qkv_proj |
1 | 0 | 0 |
attr |
OuroAttention.o_proj |
1 | 0 | 0 |
attr |
OuroAttention.rotary_emb |
1 | 0 | 0 |
attr |
OuroAttention.attn |
1 | 0 | 0 |
attr |
OuroDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
OuroDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
OuroDecoderLayer.mlp |
1 | 0 | 0 |
attr |
OuroDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
OuroDecoderLayer.input_layernorm_2 |
1 | 0 | 0 |
attr |
OuroDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
OuroDecoderLayer.post_attention_layernorm_2 |
1 | 0 | 0 |
vllm.model_executor.models.ovis (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
st_argmax |
3 | 2 | 0 |
meth |
VisualTokenizer.init |
4 | 3 | 0 |
attr |
VisualTokenizer.config |
1 | 0 | 0 |
attr |
VisualTokenizer.backbone |
1 | 0 | 0 |
attr |
VisualTokenizer.head |
1 | 0 | 0 |
meth |
Ovis.init |
3 | 2 | 0 |
attr |
Ovis.image_pad_token_id |
1 | 0 | 0 |
attr |
Ovis.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Ovis.llm |
1 | 0 | 0 |
attr |
Ovis.visual_tokenizer |
1 | 0 | 0 |
attr |
Ovis.vte |
1 | 0 | 0 |
meth |
OvisProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
VisualEmbedding.init |
3 | 0 | 0 |
prop |
VisualEmbedding.device |
1 | 0 | 0 |
prop |
VisualEmbedding.dtype |
1 | 0 | 0 |
vllm.model_executor.models.ovis2_5 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ovis2_5.init |
3 | 2 | 0 |
attr |
Ovis2_5.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Ovis2_5.llm |
1 | 0 | 0 |
attr |
Ovis2_5.visual_tokenizer |
1 | 0 | 0 |
attr |
Ovis2_5.vte |
1 | 0 | 0 |
meth |
VisualTokenizer.init |
5 | 4 | 0 |
meth |
VisualTokenizer._init_backbone |
4 | 3 | 0 |
attr |
VisualTokenizer.config |
1 | 0 | 0 |
attr |
VisualTokenizer.vit |
1 | 0 | 0 |
attr |
VisualTokenizer.head |
1 | 0 | 0 |
meth |
Ovis2_5ProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Ovis2_5ProcessingInfo.get_hf_processor |
2 | 0 | 0 |
vllm.model_executor.models.paddleocr_vl (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
SiglipAttention.tp_size |
1 | 0 | 0 |
attr |
SiglipAttention.tp_rank |
1 | 0 | 0 |
attr |
SiglipAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
SiglipAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
SiglipAttention.qkv_proj |
1 | 0 | 0 |
attr |
SiglipAttention.out_proj |
1 | 0 | 0 |
attr |
SiglipAttention.attn |
1 | 0 | 0 |
attr |
SiglipAttention.apply_rotary_emb |
1 | 0 | 0 |
meth |
PaddleOCRVLForConditionalGeneration.init |
3 | 2 | 0 |
meth |
PaddleOCRVLForConditionalGeneration.forward |
6 | 4 | 0 |
meth |
PaddleOCRVLForConditionalGeneration.embed_multimodal |
2 | 1 | 0 |
attr |
PaddleOCRVLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
PaddleOCRVLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
PaddleOCRVLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
PaddleOCRVLForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
PaddleOCRVLForConditionalGeneration.mlp_AR |
1 | 0 | 0 |
attr |
PaddleOCRVLForConditionalGeneration.language_model |
1 | 0 | 0 |
func |
smart_resize |
6 | 5 | 0 |
meth |
PaddleOCRVLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
PaddleOCRVLProcessingInfo.get_hf_processor |
2 | 1 | 0 |
meth |
PaddleOCRVLProcessingInfo.get_image_processor |
2 | 1 | 0 |
meth |
PaddleOCRVLProcessingInfo.get_supported_mm_limits |
1 | 0 | 0 |
meth |
SiglipVisionEmbeddings.init |
2 | 1 | 0 |
meth |
SiglipVisionEmbeddings.fetch_position_embedding_lfu_cache |
5 | 4 | 0 |
meth |
SiglipVisionEmbeddings.forward |
5 | 4 | 0 |
attr |
SiglipVisionEmbeddings.config |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.cache_position_embedding |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.cache_position_count |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.position_embedding |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.packing_position_embedding |
1 | 0 | 0 |
meth |
SiglipEncoder.init |
4 | 3 | 0 |
meth |
SiglipEncoder.flatten_list |
2 | 0 | 0 |
meth |
SiglipEncoder.forward |
6 | 5 | 0 |
attr |
SiglipEncoder.config |
1 | 0 | 0 |
attr |
SiglipEncoder.attn_backend |
1 | 0 | 0 |
attr |
SiglipEncoder.layers |
1 | 0 | 0 |
attr |
SiglipEncoder.rotary_pos_emb |
1 | 0 | 0 |
func |
all_gather_interleave |
4 | 3 | 0 |
meth |
SigLIPRotaryEmbedding.rope_init |
1 | 0 | 0 |
attr |
SigLIPRotaryEmbedding.dim |
1 | 0 | 0 |
attr |
SigLIPRotaryEmbedding.theta |
1 | 0 | 0 |
meth |
Projector.init |
4 | 3 | 0 |
attr |
Projector.text_config |
1 | 0 | 0 |
attr |
Projector.vision_config |
1 | 0 | 0 |
attr |
Projector.merge_kernel_size |
1 | 0 | 0 |
attr |
Projector.hidden_size |
1 | 0 | 0 |
attr |
Projector.pre_norm |
1 | 0 | 0 |
attr |
Projector.linear_1 |
1 | 0 | 0 |
attr |
Projector.act |
1 | 0 | 0 |
attr |
Projector.linear_2 |
1 | 0 | 0 |
meth |
SiglipEncoderLayer.init |
4 | 3 | 0 |
attr |
SiglipEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.layer_norm2 |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.mlp |
1 | 0 | 0 |
meth |
SiglipVisionTransformer.init |
4 | 3 | 0 |
attr |
SiglipVisionTransformer.config |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.encoder |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.post_layernorm |
1 | 0 | 0 |
meth |
SiglipVisionModel.init |
4 | 2 | 0 |
meth |
SiglipVisionModel.forward |
6 | 5 | 0 |
attr |
SiglipVisionModel.vision_model |
1 | 0 | 0 |
attr |
SiglipVisionModel.quant_config |
1 | 0 | 0 |
vllm.model_executor.models.paligemma (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaliGemmaForConditionalGeneration.init |
3 | 2 | 0 |
attr |
PaliGemmaForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
PaliGemmaForConditionalGeneration.config |
1 | 0 | 0 |
attr |
PaliGemmaForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
PaliGemmaForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
PaliGemmaForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
PaliGemmaForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
PaliGemmaForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
PaliGemmaForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
PaliGemmaMultiModalProjector.init |
3 | 2 | 0 |
attr |
PaliGemmaMultiModalProjector.linear |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
PaliGemmaProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
PaliGemmaProcessingInfo.get_vision_encoder_info |
1 | 0 | 0 |
vllm.model_executor.models.parakeet (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ParakeetProjection.norm |
1 | 0 | 0 |
attr |
ParakeetProjection.linear1 |
1 | 0 | 0 |
attr |
ParakeetProjection.activation |
1 | 0 | 0 |
attr |
ParakeetProjection.linear2 |
1 | 0 | 0 |
meth |
ParakeetExtractor.call |
4 | 1 | 0 |
attr |
ParakeetExtractor.config |
1 | 0 | 0 |
attr |
ProjectedParakeet.config |
1 | 0 | 0 |
attr |
ProjectedParakeet.encoder |
1 | 0 | 0 |
attr |
ProjectedParakeet.projection |
1 | 0 | 0 |
vllm.model_executor.models.persimmon (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PersimmonAttention.init |
5 | 4 | 0 |
attr |
PersimmonAttention.config |
1 | 0 | 0 |
attr |
PersimmonAttention.hidden_size |
1 | 0 | 0 |
attr |
PersimmonAttention.total_num_heads |
1 | 0 | 0 |
attr |
PersimmonAttention.num_heads |
1 | 0 | 0 |
attr |
PersimmonAttention.head_dim |
1 | 0 | 0 |
attr |
PersimmonAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
PersimmonAttention.is_causal |
1 | 0 | 0 |
attr |
PersimmonAttention.query_key_value |
1 | 0 | 0 |
attr |
PersimmonAttention.dense |
1 | 0 | 0 |
attr |
PersimmonAttention.is_qk_layernorm |
1 | 0 | 0 |
attr |
PersimmonAttention.rotary_emb |
1 | 0 | 0 |
attr |
PersimmonAttention.scaling |
1 | 0 | 0 |
attr |
PersimmonAttention.attn |
1 | 0 | 0 |
attr |
PersimmonAttention.q_layernorm |
1 | 0 | 0 |
attr |
PersimmonAttention.k_layernorm |
1 | 0 | 0 |
meth |
PersimmonModel.init |
3 | 2 | 0 |
attr |
PersimmonModel.vocab_size |
1 | 0 | 0 |
attr |
PersimmonModel.config |
1 | 0 | 0 |
attr |
PersimmonModel.embed_tokens |
1 | 0 | 0 |
attr |
PersimmonModel.final_layernorm |
1 | 0 | 0 |
attr |
PersimmonModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
PersimmonMLP.init |
4 | 3 | 0 |
meth |
PersimmonMLP.forward |
2 | 1 | 0 |
attr |
PersimmonMLP.dense_h_to_4h |
1 | 0 | 0 |
attr |
PersimmonMLP.dense_4h_to_h |
1 | 0 | 0 |
attr |
PersimmonMLP.act |
1 | 0 | 0 |
meth |
PersimmonForCausalLM.init |
3 | 2 | 0 |
meth |
PersimmonForCausalLM.forward |
5 | 4 | 0 |
attr |
PersimmonForCausalLM.config |
1 | 0 | 0 |
attr |
PersimmonForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
PersimmonForCausalLM.model |
1 | 0 | 0 |
attr |
PersimmonForCausalLM.lm_head |
1 | 0 | 0 |
attr |
PersimmonForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
PersimmonForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
PersimmonDecoderLayer.init |
5 | 4 | 0 |
attr |
PersimmonDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
PersimmonDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
PersimmonDecoderLayer.mlp |
1 | 0 | 0 |
attr |
PersimmonDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
PersimmonDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.phi (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PhiLayer.init |
5 | 4 | 0 |
attr |
PhiLayer.input_layernorm |
1 | 0 | 0 |
attr |
PhiLayer.self_attn |
1 | 0 | 0 |
attr |
PhiLayer.mlp |
1 | 0 | 0 |
meth |
PhiMLP.init |
4 | 3 | 0 |
meth |
PhiMLP.forward |
2 | 0 | 0 |
attr |
PhiMLP.fc1 |
1 | 0 | 0 |
attr |
PhiMLP.fc2 |
1 | 0 | 0 |
attr |
PhiMLP.act |
1 | 0 | 0 |
meth |
PhiForCausalLM.init |
3 | 2 | 0 |
attr |
PhiForCausalLM.config |
1 | 0 | 0 |
attr |
PhiForCausalLM.quant_config |
1 | 0 | 0 |
attr |
PhiForCausalLM.model |
1 | 0 | 0 |
attr |
PhiForCausalLM.lm_head |
1 | 0 | 0 |
attr |
PhiForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
PhiForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
PhiModel.init |
3 | 2 | 0 |
attr |
PhiModel.config |
1 | 0 | 0 |
attr |
PhiModel.quant_config |
1 | 0 | 0 |
attr |
PhiModel.embed_tokens |
1 | 0 | 0 |
attr |
PhiModel.final_layernorm |
1 | 0 | 0 |
attr |
PhiModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
PhiAttention.init |
5 | 4 | 0 |
attr |
PhiAttention.hidden_size |
1 | 0 | 0 |
attr |
PhiAttention.head_size |
1 | 0 | 0 |
attr |
PhiAttention.num_heads |
1 | 0 | 0 |
attr |
PhiAttention.qkv_proj |
1 | 0 | 0 |
attr |
PhiAttention.dense |
1 | 0 | 0 |
attr |
PhiAttention.rotary_emb |
1 | 0 | 0 |
attr |
PhiAttention.attn |
1 | 0 | 0 |
vllm.model_executor.models.phi3v (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi3HDImageEmbedding.hd_feature_transform |
3 | 0 | 0 |
meth |
Phi3HDImageEmbedding.reshape_hd_patches_2x2merge |
4 | 0 | 0 |
meth |
Phi3HDImageEmbedding.add_image_newline |
2 | 0 | 0 |
attr |
Phi3HDImageEmbedding.img_processor |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.num_img_tokens |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.image_dim_out |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.use_hd_transform |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.with_learnable_separator |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.hd_transform_order |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.glb_GN |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.sub_GN |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.img_projection |
1 | 0 | 0 |
attr |
Phi3HDImageEmbedding.type_feature |
1 | 0 | 0 |
attr |
CLIP_VIT_LARGE_PATCH14_336_CONFIG |
1 | 0 | 0 |
meth |
Phi3VForCausalLM.init |
3 | 2 | 0 |
meth |
Phi3VForCausalLM.forward |
6 | 5 | 0 |
attr |
Phi3VForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Phi3VForCausalLM.config |
1 | 0 | 0 |
attr |
Phi3VForCausalLM.multimodal_config |
1 | 0 | 0 |
attr |
Phi3VForCausalLM.image_token_id |
1 | 0 | 0 |
attr |
Phi3VForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Phi3VForCausalLM.embed_tokens |
1 | 0 | 0 |
attr |
Phi3VForCausalLM.vision_embed_tokens |
1 | 0 | 0 |
attr |
Phi3VForCausalLM.language_model |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.phi4mm (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi4MMProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
Phi4MMProcessingInfo._find_target_aspect_ratio |
6 | 5 | 0 |
meth |
Phi4MMProcessingInfo._compute_num_image_tokens |
7 | 6 | 0 |
meth |
Phi4MMImageEncoder.get_img_features |
3 | 2 | 0 |
attr |
Phi4MMImageEncoder.img_processor |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.num_img_tokens |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.base_feat_height_target |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.image_dim_out |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.img_sizes |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.image_attention_mask |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.use_hd_transform |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.with_learnable_separator |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.hd_transform_order |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.freeze_img_processor |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.crop_size |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.image_token_compression_cls |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.image_token_compression |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.base_feat_height_reduction |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.glb_GN |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.sub_GN |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.img_projection |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.vocab_size |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.img_features |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.use_out_place_operations |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.layer_idx |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.type_feature |
1 | 0 | 0 |
attr |
Phi4MMImageEncoder.img_processor_padding |
1 | 0 | 0 |
func |
get_navit_vision_model |
3 | 1 | 0 |
meth |
Phi4MMForCausalLM.init |
3 | 2 | 0 |
attr |
Phi4MMForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Phi4MMForCausalLM.config |
1 | 0 | 0 |
attr |
Phi4MMForCausalLM.multimodal_config |
1 | 0 | 0 |
attr |
Phi4MMForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Phi4MMForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Phi4MMForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Phi4MMForCausalLM.vision_encoder |
1 | 0 | 0 |
attr |
Phi4MMForCausalLM.embed_tokens_extend |
1 | 0 | 0 |
attr |
Phi4MMForCausalLM.model |
1 | 0 | 0 |
func |
cat_with_pad |
4 | 0 | 0 |
vllm.model_executor.models.phi4mm_audio (47 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ConformerEncoder.num_blocks |
1 | 0 | 0 |
attr |
ConformerEncoder.num_lang |
1 | 0 | 0 |
attr |
ConformerEncoder.kernel_size |
1 | 0 | 0 |
attr |
ConformerEncoder.num_heads_k |
1 | 0 | 0 |
attr |
ConformerEncoder.encoders |
1 | 0 | 0 |
attr |
ConformerEncoder.extra_layer_output_idx |
1 | 0 | 0 |
meth |
WindowQformer.init |
9 | 8 | 0 |
attr |
WindowQformer.decoders |
1 | 0 | 0 |
attr |
WindowQformer.queries |
1 | 0 | 0 |
attr |
WindowQformer.after_norm |
1 | 0 | 0 |
attr |
WindowQformer.window_size |
1 | 0 | 0 |
meth |
TransformerEncoderBase.forward |
1 | 1 | 1 |
attr |
TransformerEncoderBase.input_size |
1 | 0 | 0 |
attr |
TransformerEncoderBase.input_layer |
1 | 0 | 0 |
attr |
TransformerEncoderBase.chunk_size |
1 | 0 | 0 |
attr |
TransformerEncoderBase.left_chunk |
1 | 0 | 0 |
attr |
TransformerEncoderBase.attention_dim |
1 | 0 | 0 |
attr |
TransformerEncoderBase.num_heads |
1 | 0 | 0 |
attr |
TransformerEncoderBase.attention_group_size |
1 | 0 | 0 |
attr |
TransformerEncoderBase.time_reduction |
1 | 0 | 0 |
attr |
TransformerEncoderBase.nemo_conv_settings |
1 | 0 | 0 |
attr |
TransformerEncoderBase.encoder_embedding_config |
1 | 0 | 0 |
attr |
TransformerEncoderBase.pos_emb |
1 | 0 | 0 |
attr |
TransformerEncoderBase.relative_attention_bias_type |
1 | 0 | 0 |
attr |
TransformerEncoderBase.encoder_embedding |
1 | 0 | 0 |
attr |
TransformerEncoderBase.embed |
1 | 0 | 0 |
attr |
TransformerEncoderBase.relative_attention_bias_layer |
1 | 0 | 0 |
attr |
ConformerEncoderLayer.feed_forward_in |
1 | 0 | 0 |
attr |
ConformerEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
ConformerEncoderLayer.conv |
1 | 0 | 0 |
attr |
ConformerEncoderLayer.feed_forward_out |
1 | 0 | 0 |
attr |
ConformerEncoderLayer.layer_norm_att |
1 | 0 | 0 |
attr |
ConformerEncoderLayer.layer_norm |
1 | 0 | 0 |
meth |
AudioEmbedding.init |
3 | 3 | 1 |
attr |
AudioEmbedding.config |
1 | 0 | 0 |
attr |
AudioEmbedding.layer_idx |
1 | 0 | 0 |
attr |
AudioEmbedding.audio_dim_out |
1 | 0 | 0 |
attr |
AudioEmbedding.audio_dim_in |
1 | 0 | 0 |
attr |
AudioEmbedding.freeze_audio_processor |
1 | 0 | 0 |
attr |
AudioEmbedding.downsample_rate |
1 | 0 | 0 |
attr |
AudioEmbedding.vocab_size |
1 | 0 | 0 |
attr |
AudioEmbedding.input_embeds |
1 | 0 | 0 |
attr |
AudioEmbedding.audio_embed_sizes |
1 | 0 | 0 |
attr |
AudioEmbedding.encoder |
1 | 0 | 0 |
attr |
AudioEmbedding.qformer |
1 | 0 | 0 |
attr |
AudioEmbedding.conv_ds |
1 | 0 | 0 |
attr |
AudioEmbedding.audio_projection |
1 | 0 | 0 |
attr |
AudioEmbedding.linear_downsample_rate |
1 | 0 | 0 |
attr |
AudioEmbedding.audio_projection_for_vision |
1 | 0 | 0 |
vllm.model_executor.models.phi4mm_utils (76 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
NemoConvSubsampling.subsampling_factor |
1 | 0 | 0 |
attr |
NemoConvSubsampling.is_causal |
1 | 0 | 0 |
attr |
NemoConvSubsampling.subsampling_causal_cond |
1 | 0 | 0 |
attr |
NemoConvSubsampling.subsampling_conv_chunking_factor |
1 | 0 | 0 |
attr |
NemoConvSubsampling.conv |
1 | 0 | 0 |
attr |
NemoConvSubsampling.out |
1 | 0 | 0 |
attr |
NemoConvSubsampling.conv2d_subsampling |
1 | 0 | 0 |
attr |
DepthWiseSeperableConv1d.dw_conv |
1 | 0 | 0 |
attr |
DepthWiseSeperableConv1d.depthwise_seperable_out_channel |
1 | 0 | 0 |
attr |
DepthWiseSeperableConv1d.pw_conv |
1 | 0 | 0 |
attr |
ConvModule.layer_norm |
1 | 0 | 0 |
attr |
ConvModule.input_dim |
1 | 0 | 0 |
attr |
ConvModule.ext_pw_out_channel |
1 | 0 | 0 |
attr |
ConvModule.ext_pw_kernel_size |
1 | 0 | 0 |
attr |
ConvModule.depthwise_seperable_out_channel |
1 | 0 | 0 |
attr |
ConvModule.glu_type |
1 | 0 | 0 |
attr |
ConvModule.bias_in_glu |
1 | 0 | 0 |
attr |
ConvModule.linear_glu_in_convm |
1 | 0 | 0 |
attr |
ConvModule.causal |
1 | 0 | 0 |
attr |
ConvModule.batch_norm |
1 | 0 | 0 |
attr |
ConvModule.kernel_size |
1 | 0 | 0 |
attr |
ConvModule.act |
1 | 0 | 0 |
attr |
ConvModule.dropout |
1 | 0 | 0 |
attr |
ConvModule.export |
1 | 0 | 0 |
attr |
ConvModule.dw_sep_conv_1d |
1 | 0 | 0 |
attr |
ConvModule.bn_layer |
1 | 0 | 0 |
attr |
ConvModule.ln2 |
1 | 0 | 0 |
attr |
GLU.dim |
1 | 0 | 0 |
attr |
GLU.act_fn |
1 | 0 | 0 |
attr |
MeanVarianceNormLayer.input_size |
1 | 0 | 0 |
attr |
MeanVarianceNormLayer.global_mean |
1 | 0 | 0 |
attr |
MeanVarianceNormLayer.global_invstd |
1 | 0 | 0 |
attr |
T5RelativeAttentionLogitBias.num_heads |
1 | 0 | 0 |
attr |
T5RelativeAttentionLogitBias.num_buckets |
1 | 0 | 0 |
attr |
T5RelativeAttentionLogitBias.max_distance |
1 | 0 | 0 |
attr |
T5RelativeAttentionLogitBias.symmetric |
1 | 0 | 0 |
attr |
T5RelativeAttentionLogitBias.bias_values |
1 | 0 | 0 |
attr |
GLUPointWiseConv.glu_type |
1 | 0 | 0 |
attr |
GLUPointWiseConv.output_dim |
1 | 0 | 0 |
attr |
GLUPointWiseConv.bias_in_glu |
1 | 0 | 0 |
attr |
GLUPointWiseConv.glu_act |
1 | 0 | 0 |
attr |
GLUPointWiseConv.ext_pw_conv_1d |
1 | 0 | 0 |
attr |
GLUPointWiseConv.b1 |
1 | 0 | 0 |
attr |
GLUPointWiseConv.b2 |
1 | 0 | 0 |
attr |
GLULinear.linear |
1 | 0 | 0 |
attr |
GLULinear.glu_act |
1 | 0 | 0 |
attr |
MultiHeadedAttention.d_k |
1 | 0 | 0 |
attr |
MultiHeadedAttention.linear_q |
1 | 0 | 0 |
attr |
MultiHeadedAttention.linear_k |
1 | 0 | 0 |
attr |
MultiHeadedAttention.linear_v |
1 | 0 | 0 |
attr |
MultiHeadedAttention.linear_out |
1 | 0 | 0 |
attr |
MultiHeadedAttention.attn |
1 | 0 | 0 |
attr |
MultiHeadedAttention.dropout |
1 | 0 | 0 |
attr |
MultiHeadedAttention.dropout_rate |
1 | 0 | 0 |
attr |
MultiHeadedAttention.use_pt_scaled_dot_product_attention |
1 | 0 | 0 |
attr |
MultiHeadedAttention.quant_q |
1 | 0 | 0 |
attr |
MultiHeadedAttention.quant_x |
1 | 0 | 0 |
attr |
MultiHeadedAttention.dequant |
1 | 0 | 0 |
attr |
MultiHeadedAttention.ffunc |
1 | 0 | 0 |
meth |
MultiSequential.forward |
2 | 1 | 0 |
attr |
BlockBase.input_size |
1 | 0 | 0 |
attr |
BlockBase.output_size |
1 | 0 | 0 |
attr |
FeedForward.d_model |
1 | 0 | 0 |
attr |
FeedForward.d_inner |
1 | 0 | 0 |
attr |
FeedForward.layer_norm |
1 | 0 | 0 |
attr |
FeedForward.net |
1 | 0 | 0 |
meth |
CausalConv2D.init |
12 | 10 | 0 |
attr |
AbsolutePositionalEncoding.d_model |
1 | 0 | 0 |
attr |
AbsolutePositionalEncoding.xscale |
1 | 0 | 0 |
attr |
AbsolutePositionalEncoding.dropout |
1 | 0 | 0 |
attr |
AbsolutePositionalEncoding.pe |
1 | 0 | 0 |
meth |
CausalConv1D.init |
12 | 10 | 0 |
attr |
CausalConv1D.cache_drop_size |
1 | 0 | 0 |
attr |
AttModule.export_mode |
1 | 0 | 0 |
vllm.model_executor.models.phimoe (93 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PhiMoEModel.init |
3 | 2 | 0 |
attr |
PhiMoEModel.vocab_size |
1 | 0 | 0 |
attr |
PhiMoEModel.config |
1 | 0 | 0 |
attr |
PhiMoEModel.quant_config |
1 | 0 | 0 |
attr |
PhiMoEModel.embed_tokens |
1 | 0 | 0 |
attr |
PhiMoEModel.norm |
1 | 0 | 0 |
attr |
PhiMoEModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
PhiMoEDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
PhiMoEDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
PhiMoEDecoderLayer.block_sparse_moe |
1 | 0 | 0 |
attr |
PhiMoEDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
PhiMoEDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
mp.forward |
7 | 5 | 0 |
meth |
mp.backward |
3 | 1 | 0 |
meth |
PhiMoE.init |
9 | 8 | 0 |
attr |
PhiMoE.hidden_size |
1 | 0 | 0 |
attr |
PhiMoE.gate |
1 | 0 | 0 |
attr |
PhiMoE.experts |
1 | 0 | 0 |
attr |
PhiMoEAttention.hidden_size |
1 | 0 | 0 |
attr |
PhiMoEAttention.total_num_heads |
1 | 0 | 0 |
attr |
PhiMoEAttention.num_heads |
1 | 0 | 0 |
attr |
PhiMoEAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
PhiMoEAttention.num_kv_heads |
1 | 0 | 0 |
attr |
PhiMoEAttention.head_dim |
1 | 0 | 0 |
attr |
PhiMoEAttention.q_size |
1 | 0 | 0 |
attr |
PhiMoEAttention.kv_size |
1 | 0 | 0 |
attr |
PhiMoEAttention.scaling |
1 | 0 | 0 |
attr |
PhiMoEAttention.qkv_proj |
1 | 0 | 0 |
attr |
PhiMoEAttention.o_proj |
1 | 0 | 0 |
attr |
PhiMoEAttention.rotary_emb |
1 | 0 | 0 |
attr |
PhiMoEAttention.attn |
1 | 0 | 0 |
func |
sparsemixer |
3 | 0 | 0 |
meth |
PhiMoEForCausalLM.init |
3 | 2 | 0 |
attr |
PhiMoEForCausalLM.config |
1 | 0 | 0 |
attr |
PhiMoEForCausalLM.quant_config |
1 | 0 | 0 |
attr |
PhiMoEForCausalLM.model |
1 | 0 | 0 |
attr |
PhiMoEForCausalLM.lm_head |
1 | 0 | 0 |
attr |
PhiMoEForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
PhiMoEForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
func |
phimoe_routing_function |
5 | 4 | 0 |
meth |
PhiMoEConfig.init |
28 | 0 | 0 |
attr |
PhiMoEConfig.vocab_size |
1 | 0 | 0 |
attr |
PhiMoEConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PhiMoEConfig.hidden_size |
1 | 0 | 0 |
attr |
PhiMoEConfig.intermediate_size |
1 | 0 | 0 |
attr |
PhiMoEConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PhiMoEConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PhiMoEConfig.sliding_window |
1 | 0 | 0 |
attr |
PhiMoEConfig.attention_bias |
1 | 0 | 0 |
attr |
PhiMoEConfig.lm_head_bias |
1 | 0 | 0 |
attr |
PhiMoEConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
PhiMoEConfig.head_dim |
1 | 0 | 0 |
attr |
PhiMoEConfig.hidden_act |
1 | 0 | 0 |
attr |
PhiMoEConfig.initializer_range |
1 | 0 | 0 |
attr |
PhiMoEConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
PhiMoEConfig.use_cache |
1 | 0 | 0 |
attr |
PhiMoEConfig.attention_dropout |
1 | 0 | 0 |
attr |
PhiMoEConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
PhiMoEConfig.num_local_experts |
1 | 0 | 0 |
attr |
PhiMoEConfig.output_router_logits |
1 | 0 | 0 |
attr |
PhiMoEConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
PhiMoEConfig.router_jitter_noise |
1 | 0 | 0 |
vllm.model_executor.models.pixtral (66 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Transformer.init |
2 | 1 | 0 |
attr |
Transformer.layers |
1 | 0 | 0 |
attr |
PixtralHFMLP.gate_up_proj |
1 | 0 | 0 |
attr |
PixtralHFMLP.down_proj |
1 | 0 | 0 |
attr |
PixtralHFMLP.act_and_mul |
1 | 0 | 0 |
meth |
TransformerBlock.init |
2 | 1 | 0 |
attr |
TransformerBlock.attention |
1 | 0 | 0 |
attr |
TransformerBlock.feed_forward |
1 | 0 | 0 |
attr |
TransformerBlock.attention_norm |
1 | 0 | 0 |
attr |
TransformerBlock.ffn_norm |
1 | 0 | 0 |
meth |
VisionTransformer.init |
2 | 1 | 0 |
attr |
VisionTransformer.args |
1 | 0 | 0 |
attr |
VisionTransformer.patch_conv |
1 | 0 | 0 |
attr |
VisionTransformer.ln_pre |
1 | 0 | 0 |
attr |
VisionTransformer.transformer |
1 | 0 | 0 |
meth |
VisionLanguageAdapter.init |
3 | 2 | 0 |
attr |
VisionLanguageAdapter.w_in |
1 | 0 | 0 |
attr |
VisionLanguageAdapter.gelu |
1 | 0 | 0 |
attr |
VisionLanguageAdapter.w_out |
1 | 0 | 0 |
meth |
PixtralForConditionalGeneration.init |
3 | 2 | 0 |
meth |
PixtralForConditionalGeneration.load_weights |
2 | 1 | 0 |
attr |
PixtralForConditionalGeneration.config |
1 | 0 | 0 |
attr |
PixtralForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
PixtralForConditionalGeneration.vision_args |
1 | 0 | 0 |
attr |
PixtralForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
PixtralForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
PixtralForConditionalGeneration.vision_encoder |
1 | 0 | 0 |
attr |
PixtralForConditionalGeneration.pre_mm_projector_norm |
1 | 0 | 0 |
attr |
PixtralForConditionalGeneration.patch_merger |
1 | 0 | 0 |
attr |
PixtralForConditionalGeneration.vision_language_adapter |
1 | 0 | 0 |
meth |
PixtralProcessorAdapter.call |
5 | 4 | 0 |
attr |
PixtralProcessorAdapter.tokenizer |
1 | 0 | 0 |
meth |
Attention.init |
2 | 1 | 0 |
attr |
Attention.args |
1 | 0 | 0 |
attr |
Attention.n_heads |
1 | 0 | 0 |
attr |
Attention.head_dim |
1 | 0 | 0 |
attr |
Attention.wq |
1 | 0 | 0 |
attr |
Attention.wk |
1 | 0 | 0 |
attr |
Attention.wv |
1 | 0 | 0 |
attr |
Attention.wo |
1 | 0 | 0 |
attr |
PixtralHFVisionModel.config |
1 | 0 | 0 |
attr |
PixtralHFVisionModel.patch_conv |
1 | 0 | 0 |
attr |
PixtralHFVisionModel.ln_pre |
1 | 0 | 0 |
attr |
PixtralHFVisionModel.transformer |
1 | 0 | 0 |
attr |
PixtralHFVisionModel.dtype |
1 | 0 | 0 |
attr |
PixtralHFVisionModel.device |
1 | 0 | 0 |
attr |
PixtralHFVisionModel.patch_positional_embedding |
1 | 0 | 0 |
attr |
PatchMerger.spatial_merge_size |
1 | 0 | 0 |
attr |
PatchMerger.mlp_input_dim |
1 | 0 | 0 |
attr |
PatchMerger.merging_layer |
1 | 0 | 0 |
meth |
FeedForward.init |
2 | 1 | 0 |
attr |
FeedForward.w1 |
1 | 0 | 0 |
attr |
FeedForward.w2 |
1 | 0 | 0 |
attr |
FeedForward.w3 |
1 | 0 | 0 |
attr |
PixtralHFTransformer.layers |
1 | 0 | 0 |
attr |
PixtralHFAttention.config |
1 | 0 | 0 |
attr |
PixtralHFAttention.total_num_heads |
1 | 0 | 0 |
attr |
PixtralHFAttention.head_dim |
1 | 0 | 0 |
attr |
PixtralHFAttention.qkv_proj |
1 | 0 | 0 |
attr |
PixtralHFAttention.o_proj |
1 | 0 | 0 |
attr |
PixtralHFAttention.tp_size |
1 | 0 | 0 |
attr |
PixtralHFAttention.n_heads |
1 | 0 | 0 |
attr |
PixtralHFTransformerBlock.attention_norm |
1 | 0 | 0 |
attr |
PixtralHFTransformerBlock.attention |
1 | 0 | 0 |
attr |
PixtralHFTransformerBlock.feed_forward |
1 | 0 | 0 |
attr |
PixtralHFTransformerBlock.ffn_norm |
1 | 0 | 0 |
vllm.model_executor.models.plamo2 (86 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Plamo2DecoderLayer.init |
5 | 4 | 0 |
meth |
Plamo2DecoderLayer.forward |
5 | 3 | 0 |
attr |
Plamo2DecoderLayer.is_mamba |
1 | 0 | 0 |
attr |
Plamo2DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Plamo2DecoderLayer.pre_mixer_norm |
1 | 0 | 0 |
attr |
Plamo2DecoderLayer.post_mixer_norm |
1 | 0 | 0 |
attr |
Plamo2DecoderLayer.pre_mlp_norm |
1 | 0 | 0 |
attr |
Plamo2DecoderLayer.post_mlp_norm |
1 | 0 | 0 |
attr |
Plamo2DecoderLayer.mixer |
1 | 0 | 0 |
attr |
DenseMLP.hidden_size |
1 | 0 | 0 |
attr |
DenseMLP.intermediate_size |
1 | 0 | 0 |
attr |
DenseMLP.gate_up_proj |
1 | 0 | 0 |
attr |
DenseMLP.act |
1 | 0 | 0 |
attr |
DenseMLP.down_proj |
1 | 0 | 0 |
meth |
Plamo2Model.init |
3 | 2 | 0 |
attr |
Plamo2Model.config |
1 | 0 | 0 |
attr |
Plamo2Model.vocab_size |
1 | 0 | 0 |
attr |
Plamo2Model.embed_tokens |
1 | 0 | 0 |
attr |
Plamo2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Plamo2Model.layers |
1 | 0 | 0 |
attr |
Plamo2Model.norm |
1 | 0 | 0 |
meth |
Plamo2MambaMixer.init |
4 | 3 | 0 |
meth |
Plamo2MambaMixer._project_ssm_parameters |
2 | 0 | 0 |
meth |
Plamo2MambaMixer.forward |
4 | 2 | 0 |
meth |
Plamo2MambaMixer.forward_impl |
4 | 2 | 0 |
attr |
Plamo2MambaMixer.config |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.cache_config |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.model_config |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.quant_config |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.is_lora_enabled |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.hidden_size |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.ssm_state_size |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.conv_kernel_size |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.intermediate_size |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.tp_size |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.head_dim |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.num_heads |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.time_step_rank |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.conv1d |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.in_proj |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.bcdt_proj |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.dt_proj |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.A |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.D |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.dt_bias |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.out_proj |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.activation |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.dt_norm |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.B_norm |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.C_norm |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.chunk_size |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.kv_cache |
1 | 0 | 0 |
attr |
Plamo2MambaMixer.prefix |
1 | 0 | 0 |
meth |
Plamo2AttentionMixer.init |
4 | 3 | 0 |
meth |
Plamo2AttentionMixer.forward |
4 | 3 | 0 |
attr |
Plamo2AttentionMixer.hidden_size |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.total_num_heads |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.num_heads |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.total_num_kv_heads |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.num_kv_heads |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.head_dim |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.q_size |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.kv_size |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.scaling |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.qkv_proj |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.o_proj |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.rotary_emb |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.q_norm |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.k_norm |
1 | 0 | 0 |
attr |
Plamo2AttentionMixer.attn |
1 | 0 | 0 |
meth |
Plamo2ForCausalLM.forward |
6 | 4 | 0 |
meth |
Plamo2ForCausalLM.load_weights |
2 | 1 | 0 |
attr |
Plamo2ForCausalLM.config |
1 | 0 | 0 |
attr |
Plamo2ForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
Plamo2ForCausalLM.model_config |
1 | 0 | 0 |
attr |
Plamo2ForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
Plamo2ForCausalLM.model |
1 | 0 | 0 |
attr |
Plamo2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Plamo2ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Plamo2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Plamo2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.plamo3 (46 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
DenseMLP.hidden_size |
1 | 0 | 0 |
attr |
DenseMLP.intermediate_size |
1 | 0 | 0 |
attr |
DenseMLP.gate_up_proj |
1 | 0 | 0 |
attr |
DenseMLP.act |
1 | 0 | 0 |
attr |
DenseMLP.down_proj |
1 | 0 | 0 |
meth |
Plamo3DecoderLayer.init |
4 | 4 | 1 |
meth |
Plamo3DecoderLayer.forward |
5 | 5 | 1 |
attr |
Plamo3DecoderLayer.mixer |
1 | 0 | 0 |
attr |
Plamo3DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Plamo3DecoderLayer.pre_mixer_norm |
1 | 0 | 0 |
attr |
Plamo3DecoderLayer.post_mixer_norm |
1 | 0 | 0 |
attr |
Plamo3DecoderLayer.pre_mlp_norm |
1 | 0 | 0 |
attr |
Plamo3DecoderLayer.post_mlp_norm |
1 | 0 | 0 |
meth |
Plamo3Model.init |
3 | 2 | 0 |
attr |
Plamo3Model.config |
1 | 0 | 0 |
attr |
Plamo3Model.vocab_size |
1 | 0 | 0 |
attr |
Plamo3Model.org_vocab_size |
1 | 0 | 0 |
attr |
Plamo3Model.embed_tokens |
1 | 0 | 0 |
attr |
Plamo3Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Plamo3Model.layers |
1 | 0 | 0 |
attr |
Plamo3Model.norm |
1 | 0 | 0 |
meth |
Plamo3ForCausalLM.load_weights |
2 | 1 | 0 |
attr |
Plamo3ForCausalLM.config |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.model_config |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.model |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.unpadded_vocab_size |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Plamo3ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Plamo3AttentionMixer.init |
4 | 3 | 0 |
meth |
Plamo3AttentionMixer.forward |
5 | 5 | 1 |
attr |
Plamo3AttentionMixer.hidden_size |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.total_num_heads |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.num_heads |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.total_num_kv_heads |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.num_kv_heads |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.head_dim |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.q_size |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.kv_size |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.scaling |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.qkv_proj |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.o_proj |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.rotary_emb |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.q_norm |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.k_norm |
1 | 0 | 0 |
attr |
Plamo3AttentionMixer.attn |
1 | 0 | 0 |
vllm.model_executor.models.qwen (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QWenLMHeadModel.init |
3 | 2 | 0 |
meth |
QWenBlock.init |
5 | 4 | 0 |
attr |
QWenBlock.ln_1 |
1 | 0 | 0 |
attr |
QWenBlock.attn |
1 | 0 | 0 |
attr |
QWenBlock.ln_2 |
1 | 0 | 0 |
attr |
QWenBlock.mlp |
1 | 0 | 0 |
meth |
QWenModel.init |
3 | 2 | 0 |
attr |
QWenModel.config |
1 | 0 | 0 |
attr |
QWenModel.vocab_size |
1 | 0 | 0 |
attr |
QWenModel.wte |
1 | 0 | 0 |
attr |
QWenModel.ln_f |
1 | 0 | 0 |
attr |
QWenModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
QWenBaseModel.config |
1 | 0 | 0 |
attr |
QWenBaseModel.multimodal_config |
1 | 0 | 0 |
attr |
QWenBaseModel.quant_config |
1 | 0 | 0 |
attr |
QWenBaseModel.transformer |
1 | 0 | 0 |
attr |
QWenBaseModel.lm_head |
1 | 0 | 0 |
attr |
QWenBaseModel.logits_processor |
1 | 0 | 0 |
attr |
QWenBaseModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
QWenAttention.init |
8 | 7 | 0 |
attr |
QWenAttention.hidden_size |
1 | 0 | 0 |
attr |
QWenAttention.total_num_heads |
1 | 0 | 0 |
attr |
QWenAttention.num_heads |
1 | 0 | 0 |
attr |
QWenAttention.head_dim |
1 | 0 | 0 |
attr |
QWenAttention.c_attn |
1 | 0 | 0 |
attr |
QWenAttention.c_proj |
1 | 0 | 0 |
attr |
QWenAttention.scaling |
1 | 0 | 0 |
attr |
QWenAttention.rotary_emb |
1 | 0 | 0 |
attr |
QWenAttention.attn |
1 | 0 | 0 |
meth |
QWenMLP.init |
6 | 5 | 0 |
attr |
QWenMLP.gate_up_proj |
1 | 0 | 0 |
attr |
QWenMLP.c_proj |
1 | 0 | 0 |
attr |
QWenMLP.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.qwen2 (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2MLP.forward |
2 | 0 | 0 |
attr |
Qwen2MLP.gate_up_proj |
1 | 0 | 0 |
attr |
Qwen2MLP.down_proj |
1 | 0 | 0 |
attr |
Qwen2MLP.act_fn |
1 | 0 | 0 |
meth |
Qwen2ForCausalLM.init |
3 | 2 | 0 |
attr |
Qwen2ForCausalLM.config |
1 | 0 | 0 |
attr |
Qwen2ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Qwen2ForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Qwen2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen2ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen2DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Qwen2DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Qwen2DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Qwen2DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Qwen2DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
func |
qwen_2_model_invariants |
5 | 4 | 0 |
meth |
Qwen2Model.init |
4 | 3 | 0 |
attr |
Qwen2Model.config |
1 | 0 | 0 |
attr |
Qwen2Model.quant_config |
1 | 0 | 0 |
attr |
Qwen2Model.vocab_size |
1 | 0 | 0 |
attr |
Qwen2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen2Model.aux_hidden_state_layers |
1 | 0 | 0 |
attr |
Qwen2Model.embed_tokens |
1 | 0 | 0 |
attr |
Qwen2Model.norm |
1 | 0 | 0 |
attr |
Qwen2Attention.hidden_size |
1 | 0 | 0 |
attr |
Qwen2Attention.total_num_heads |
1 | 0 | 0 |
attr |
Qwen2Attention.num_heads |
1 | 0 | 0 |
attr |
Qwen2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Qwen2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Qwen2Attention.head_dim |
1 | 0 | 0 |
attr |
Qwen2Attention.q_size |
1 | 0 | 0 |
attr |
Qwen2Attention.kv_size |
1 | 0 | 0 |
attr |
Qwen2Attention.scaling |
1 | 0 | 0 |
attr |
Qwen2Attention.dual_chunk_attention_config |
1 | 0 | 0 |
attr |
Qwen2Attention.qk_norm |
1 | 0 | 0 |
attr |
Qwen2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Qwen2Attention.o_proj |
1 | 0 | 0 |
attr |
Qwen2Attention.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2Attention.attn |
1 | 0 | 0 |
attr |
Qwen2Attention.q_norm |
1 | 0 | 0 |
attr |
Qwen2Attention.k_norm |
1 | 0 | 0 |
vllm.model_executor.models.qwen2_5_omni_thinker (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5OmniThinkerProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen2_5OmniThinkerProcessingInfo.get_feature_extractor |
2 | 1 | 0 |
meth |
Qwen2_5OmniThinkerProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
Qwen2_5OmniThinkerMultiModalDataParser.init |
4 | 1 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.vllm_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.qwen2_5_vl (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
Qwen2_5_VisionAttention.tp_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionAttention.tp_rank |
1 | 0 | 0 |
attr |
Qwen2_5_VisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
Qwen2_5_VisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
Qwen2_5_VisionAttention.qkv |
1 | 0 | 0 |
attr |
Qwen2_5_VisionAttention.proj |
1 | 0 | 0 |
attr |
Qwen2_5_VisionAttention.attn |
1 | 0 | 0 |
attr |
Qwen2_5_VisionAttention.apply_rotary_emb |
1 | 0 | 0 |
meth |
Qwen2_5_VLForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.vllm_config |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.video_pruning_rate |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.is_multimodal_pruning_enabled |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Qwen2_5_VisionPatchMerger.hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionPatchMerger.ln_q |
1 | 0 | 0 |
attr |
Qwen2_5_VisionPatchMerger.mlp |
1 | 0 | 0 |
attr |
Qwen2_5_VisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionPatchEmbed.temporal_patch_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionPatchEmbed.hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionPatchEmbed.proj |
1 | 0 | 0 |
meth |
Qwen2_5_VLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen2_5_VisionTransformer.rotary_pos_emb_thw |
4 | 0 | 0 |
meth |
Qwen2_5_VisionTransformer.get_window_index_thw |
4 | 0 | 0 |
meth |
Qwen2_5_VisionTransformer.get_rope_by_thw |
4 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.num_heads |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.out_hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.window_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.patch_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.fullatt_block_indexes |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.spatial_merge_unit |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.attn_backend |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.blocks |
1 | 0 | 0 |
attr |
Qwen2_5_VisionTransformer.merger |
1 | 0 | 0 |
attr |
Qwen2_5_VisionBlock.norm1 |
1 | 0 | 0 |
attr |
Qwen2_5_VisionBlock.norm2 |
1 | 0 | 0 |
attr |
Qwen2_5_VisionBlock.attn |
1 | 0 | 0 |
attr |
Qwen2_5_VisionBlock.mlp |
1 | 0 | 0 |
meth |
Qwen2_5_VisionMLP.init |
7 | 6 | 0 |
meth |
Qwen2_5_VisionMLP.forward |
2 | 1 | 0 |
attr |
Qwen2_5_VisionMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Qwen2_5_VisionMLP.down_proj |
1 | 0 | 0 |
attr |
Qwen2_5_VisionMLP.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.qwen2_audio (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2AudioProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen2AudioProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
Qwen2AudioMultiModalProjector.init |
3 | 2 | 0 |
meth |
Qwen2AudioMultiModalProjector.forward |
2 | 0 | 0 |
attr |
Qwen2AudioMultiModalProjector.linear |
1 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Qwen2AudioForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.qwen2_moe (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2MoeModel.init |
3 | 2 | 0 |
attr |
Qwen2MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen2MoeModel.config |
1 | 0 | 0 |
attr |
Qwen2MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen2MoeModel.norm |
1 | 0 | 0 |
attr |
Qwen2MoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Qwen2MoeMLP.forward |
2 | 0 | 0 |
attr |
Qwen2MoeMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Qwen2MoeMLP.down_proj |
1 | 0 | 0 |
attr |
Qwen2MoeMLP.act_fn |
1 | 0 | 0 |
attr |
Qwen2MoeMLP.expert_gate |
1 | 0 | 0 |
attr |
Qwen2MoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Qwen2MoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Qwen2MoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Qwen2MoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Qwen2MoeDecoderLayer.mlp |
1 | 0 | 0 |
meth |
Qwen2MoeSparseMoeBlock.init |
4 | 3 | 0 |
attr |
Qwen2MoeSparseMoeBlock.tp_size |
1 | 0 | 0 |
attr |
Qwen2MoeSparseMoeBlock.gate |
1 | 0 | 0 |
attr |
Qwen2MoeSparseMoeBlock.shared_expert_gate |
1 | 0 | 0 |
attr |
Qwen2MoeSparseMoeBlock.experts |
1 | 0 | 0 |
attr |
Qwen2MoeSparseMoeBlock.shared_expert |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Qwen2MoeForCausalLM.init |
3 | 2 | 0 |
attr |
Qwen2MoeForCausalLM.config |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.hidden_size |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.num_heads |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.head_dim |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.q_size |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.kv_size |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.scaling |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.dual_chunk_attention_config |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.o_proj |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2MoeAttention.attn |
1 | 0 | 0 |
vllm.model_executor.models.qwen2_rm (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2RewardBaseModel.init |
3 | 2 | 0 |
attr |
Qwen2RewardBaseModel.config |
1 | 0 | 0 |
attr |
Qwen2RewardBaseModel.quant_config |
1 | 0 | 0 |
attr |
Qwen2RewardBaseModel.model |
1 | 0 | 0 |
attr |
Qwen2RewardBaseModel.head_dtype |
1 | 0 | 0 |
attr |
Qwen2RewardBaseModel.score |
1 | 0 | 0 |
attr |
Qwen2RewardBaseModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Qwen2ForProcessRewardModel.init |
3 | 2 | 0 |
meth |
Qwen2ForRewardModel.init |
3 | 2 | 0 |
vllm.model_executor.models.qwen2_vl (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2VLMultiModalDataParser.init |
4 | 1 | 0 |
meth |
Tarsier2Processor.init |
5 | 3 | 0 |
meth |
Qwen2VLForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Qwen2VLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Qwen2VLForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Qwen2VLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen2VLForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen2VLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen2VLForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen2VLForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
Qwen2VisionMLP.init |
6 | 5 | 0 |
attr |
Qwen2VisionMLP.fc1 |
1 | 0 | 0 |
attr |
Qwen2VisionMLP.act |
1 | 0 | 0 |
attr |
Qwen2VisionMLP.fc2 |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.use_data_parallel |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.out_hidden_size |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.num_heads |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.embed_dim |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.blocks |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.merger |
1 | 0 | 0 |
attr |
Qwen2VisionTransformer.attn_backend |
1 | 0 | 0 |
attr |
Qwen2VisionAttention.tp_size |
1 | 0 | 0 |
attr |
Qwen2VisionAttention.tp_rank |
1 | 0 | 0 |
attr |
Qwen2VisionAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
Qwen2VisionAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
Qwen2VisionAttention.qkv |
1 | 0 | 0 |
attr |
Qwen2VisionAttention.proj |
1 | 0 | 0 |
attr |
Qwen2VisionAttention.attn |
1 | 0 | 0 |
attr |
Qwen2VisionAttention.apply_rotary_emb |
1 | 0 | 0 |
attr |
Qwen2VisionPatchMerger.hidden_size |
1 | 0 | 0 |
attr |
Qwen2VisionPatchMerger.ln_q |
1 | 0 | 0 |
attr |
Qwen2VisionPatchMerger.mlp |
1 | 0 | 0 |
attr |
Qwen2VisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
Qwen2VisionPatchEmbed.temporal_patch_size |
1 | 0 | 0 |
attr |
Qwen2VisionPatchEmbed.embed_dim |
1 | 0 | 0 |
attr |
Qwen2VisionPatchEmbed.proj |
1 | 0 | 0 |
meth |
Tarsier2ImageProcessor.init |
3 | 2 | 0 |
attr |
Tarsier2ForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Qwen2VisionBlock.norm1 |
1 | 0 | 0 |
attr |
Qwen2VisionBlock.norm2 |
1 | 0 | 0 |
attr |
Qwen2VisionBlock.attn |
1 | 0 | 0 |
attr |
Qwen2VisionBlock.mlp |
1 | 0 | 0 |
meth |
Qwen2VLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen2VLProcessingInfo.get_data_parser |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.qwen3 (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3Model.init |
3 | 2 | 0 |
meth |
Qwen3ForCausalLM.init |
3 | 2 | 0 |
attr |
Qwen3ForCausalLM.config |
1 | 0 | 0 |
attr |
Qwen3ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Qwen3ForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Qwen3ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen3Attention.hidden_size |
1 | 0 | 0 |
attr |
Qwen3Attention.total_num_heads |
1 | 0 | 0 |
attr |
Qwen3Attention.num_heads |
1 | 0 | 0 |
attr |
Qwen3Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Qwen3Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Qwen3Attention.head_dim |
1 | 0 | 0 |
attr |
Qwen3Attention.q_size |
1 | 0 | 0 |
attr |
Qwen3Attention.kv_size |
1 | 0 | 0 |
attr |
Qwen3Attention.scaling |
1 | 0 | 0 |
attr |
Qwen3Attention.dual_chunk_attention_config |
1 | 0 | 0 |
attr |
Qwen3Attention.qkv_proj |
1 | 0 | 0 |
attr |
Qwen3Attention.o_proj |
1 | 0 | 0 |
attr |
Qwen3Attention.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3Attention.attn |
1 | 0 | 0 |
attr |
Qwen3Attention.q_norm |
1 | 0 | 0 |
attr |
Qwen3Attention.k_norm |
1 | 0 | 0 |
attr |
Qwen3DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Qwen3DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Qwen3DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Qwen3DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Qwen3DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_5 (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Qwen3_5ProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen3_5ForConditionalGeneration.init |
3 | 2 | 0 |
meth |
Qwen3_5ForConditionalGeneration.recompute_mrope_positions |
3 | 0 | 0 |
attr |
Qwen3_5ForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen3_5ForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen3_5ForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Qwen3_5ForConditionalGeneration.is_multimodal_pruning_enabled |
1 | 0 | 0 |
attr |
Qwen3_5ForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3_5ForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen3_5ForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
Qwen3_5Model.init |
3 | 2 | 0 |
attr |
Qwen3_5Model.num_redundant_experts |
1 | 0 | 0 |
attr |
Qwen3_5Model.config |
1 | 0 | 0 |
attr |
Qwen3_5Model.vocab_size |
1 | 0 | 0 |
attr |
Qwen3_5Model.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3_5Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3_5Model.norm |
1 | 0 | 0 |
meth |
Qwen3_5MoeForCausalLM.init |
3 | 2 | 0 |
meth |
Qwen3_5GatedDeltaNet.fix_query_key_value_ordering |
3 | 2 | 0 |
meth |
Qwen3_5GatedDeltaNet.forward |
3 | 2 | 0 |
meth |
Qwen3_5_MoeMixtureOfExperts.set_moe_parameters |
1 | 0 | 0 |
meth |
Qwen3_5ForCausalLMBase.init |
3 | 2 | 0 |
meth |
Qwen3_5ForCausalLMBase.forward |
6 | 5 | 0 |
attr |
Qwen3_5ForCausalLMBase.vllm_config |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLMBase.model_config |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLMBase.quant_config |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLMBase.config |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLMBase.scheduler_config |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLMBase.model |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLMBase.logits_processor |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLMBase.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLMBase.lm_head |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.layer_type |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.layer_scale |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.linear_attn |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.attn_layer_scale |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.ffn_layer_scale |
1 | 0 | 0 |
attr |
Qwen3_5DecoderLayer.self_attn |
1 | 0 | 0 |
meth |
Qwen3_5MoeProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.is_multimodal_pruning_enabled |
1 | 0 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_5_mtp (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5MoeMTP.init |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Qwen3_5MultiTokenPredictor.init |
3 | 2 | 0 |
attr |
Qwen3_5MultiTokenPredictor.config |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.vocab_size |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.fc |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.norm |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.pre_fc_norm_hidden |
1 | 0 | 0 |
attr |
Qwen3_5MultiTokenPredictor.pre_fc_norm_embedding |
1 | 0 | 0 |
meth |
Qwen3_5MTP.init |
3 | 2 | 0 |
meth |
Qwen3_5MTP.forward |
7 | 6 | 0 |
attr |
Qwen3_5MTP.vllm_config |
1 | 0 | 0 |
attr |
Qwen3_5MTP.quant_config |
1 | 0 | 0 |
attr |
Qwen3_5MTP.config |
1 | 0 | 0 |
attr |
Qwen3_5MTP.model |
1 | 0 | 0 |
attr |
Qwen3_5MTP.logits_processor |
1 | 0 | 0 |
attr |
Qwen3_5MTP.lm_head |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_asr (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3ASRProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen3ASRForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Qwen3ASRForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Qwen3ASRForConditionalGeneration.vllm_config |
1 | 0 | 0 |
attr |
Qwen3ASRForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen3ASRForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen3ASRForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
Qwen3ASRForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3ASRForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Qwen3ASRForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_asr_realtime (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3ASRRealtimeGeneration.init |
3 | 2 | 0 |
meth |
Qwen3ASRRealtimeBuffer.init |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_moe (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Qwen3MoeAttention.hidden_size |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.total_num_heads |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.num_heads |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.head_dim |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.q_size |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.kv_size |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.scaling |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.dual_chunk_attention_config |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.qkv_proj |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.o_proj |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.attn |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.q_norm |
1 | 0 | 0 |
attr |
Qwen3MoeAttention.k_norm |
1 | 0 | 0 |
attr |
Qwen3MoeDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Qwen3MoeDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Qwen3MoeDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Qwen3MoeDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Qwen3MoeDecoderLayer.mlp |
1 | 0 | 0 |
meth |
Qwen3MoeSparseMoeBlock.init |
3 | 2 | 0 |
attr |
Qwen3MoeSparseMoeBlock.tp_size |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.ep_group |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.ep_rank |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.ep_size |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.n_routed_experts |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.is_sequence_parallel |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.enable_eplb |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.n_logical_experts |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.n_redundant_experts |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.n_physical_experts |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.n_local_physical_experts |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.physical_expert_start |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.physical_expert_end |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.gate |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.experts |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.shared_expert_gate |
1 | 0 | 0 |
attr |
Qwen3MoeSparseMoeBlock.shared_expert |
1 | 0 | 0 |
meth |
Qwen3MoeMLP.forward |
2 | 0 | 0 |
attr |
Qwen3MoeMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Qwen3MoeMLP.down_proj |
1 | 0 | 0 |
attr |
Qwen3MoeMLP.act_fn |
1 | 0 | 0 |
attr |
Qwen3MoeMLP.expert_gate |
1 | 0 | 0 |
meth |
Qwen3MoeForCausalLM.init |
3 | 2 | 0 |
attr |
Qwen3MoeForCausalLM.config |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.expert_weights |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.moe_layers |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_expert_groups |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_shared_experts |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_logical_experts |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_physical_experts |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_local_physical_experts |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_routed_experts |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_redundant_experts |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Qwen3MoeModel.init |
4 | 3 | 0 |
attr |
Qwen3MoeModel.num_redundant_experts |
1 | 0 | 0 |
attr |
Qwen3MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen3MoeModel.config |
1 | 0 | 0 |
attr |
Qwen3MoeModel.quant_config |
1 | 0 | 0 |
attr |
Qwen3MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3MoeModel.norm |
1 | 0 | 0 |
attr |
Qwen3MoeModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_next (114 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
fused_gdn_gating_kernel |
12 | 4 | 0 |
meth |
ChunkGatedDeltaRule.forward_cuda |
10 | 9 | 0 |
meth |
ChunkGatedDeltaRule.forward_native |
10 | 9 | 0 |
meth |
Qwen3NextSparseMoeBlock.init |
3 | 2 | 0 |
attr |
Qwen3NextSparseMoeBlock.tp_size |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.ep_group |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.ep_rank |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.ep_size |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.n_routed_experts |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.is_sequence_parallel |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.enable_eplb |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.n_logical_experts |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.n_redundant_experts |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.n_physical_experts |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.n_local_physical_experts |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.physical_expert_start |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.physical_expert_end |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.gate |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.shared_expert_gate |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.experts |
1 | 0 | 0 |
attr |
Qwen3NextSparseMoeBlock.shared_expert |
1 | 0 | 0 |
meth |
Qwen3NextModel.init |
3 | 2 | 0 |
attr |
Qwen3NextModel.num_redundant_experts |
1 | 0 | 0 |
attr |
Qwen3NextModel.config |
1 | 0 | 0 |
attr |
Qwen3NextModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen3NextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3NextModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3NextModel.norm |
1 | 0 | 0 |
meth |
QwenNextMixtureOfExperts.set_moe_parameters |
1 | 0 | 0 |
meth |
Qwen3NextGatedDeltaNet.fix_query_key_value_ordering |
3 | 2 | 0 |
meth |
Qwen3NextGatedDeltaNet.rearrange_mixed_qkv |
2 | 0 | 0 |
meth |
Qwen3NextGatedDeltaNet.forward |
3 | 2 | 0 |
meth |
Qwen3NextGatedDeltaNet._forward_core |
5 | 4 | 0 |
attr |
Qwen3NextGatedDeltaNet.tp_size |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.tp_rank |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.hidden_size |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.num_v_heads |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.num_k_heads |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.head_k_dim |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.head_v_dim |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.key_dim |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.value_dim |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.conv_kernel_size |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.layer_idx |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.activation |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.act |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.layer_norm_epsilon |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.prefix |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.config |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.model_config |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.cache_config |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.quant_config |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.speculative_config |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.num_spec |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.conv_dim |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.conv1d |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.in_proj_qkvz |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.in_proj_ba |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.dt_bias |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.A_log |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.norm |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.out_proj |
1 | 0 | 0 |
attr |
Qwen3NextGatedDeltaNet.chunk_gated_delta_rule |
1 | 0 | 0 |
meth |
Qwen3NextDecoderLayer.forward |
5 | 4 | 0 |
attr |
Qwen3NextDecoderLayer.layer_type |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.layer_scale |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.linear_attn |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.mlp |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.attn_layer_scale |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.ffn_layer_scale |
1 | 0 | 0 |
attr |
Qwen3NextDecoderLayer.self_attn |
1 | 0 | 0 |
meth |
Qwen3NextForCausalLM.init |
3 | 2 | 0 |
meth |
Qwen3NextForCausalLM.forward |
6 | 5 | 0 |
attr |
Qwen3NextForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.model_config |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.config |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
func |
fi_chunk_gated_delta_rule |
10 | 9 | 0 |
meth |
Qwen3NextAttention.forward |
4 | 3 | 0 |
attr |
Qwen3NextAttention.config |
1 | 0 | 0 |
attr |
Qwen3NextAttention.hidden_size |
1 | 0 | 0 |
attr |
Qwen3NextAttention.total_num_heads |
1 | 0 | 0 |
attr |
Qwen3NextAttention.num_heads |
1 | 0 | 0 |
attr |
Qwen3NextAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Qwen3NextAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Qwen3NextAttention.head_dim |
1 | 0 | 0 |
attr |
Qwen3NextAttention.q_size |
1 | 0 | 0 |
attr |
Qwen3NextAttention.kv_size |
1 | 0 | 0 |
attr |
Qwen3NextAttention.scaling |
1 | 0 | 0 |
attr |
Qwen3NextAttention.dual_chunk_attention_config |
1 | 0 | 0 |
attr |
Qwen3NextAttention.attn_output_gate |
1 | 0 | 0 |
attr |
Qwen3NextAttention.qkv_proj |
1 | 0 | 0 |
attr |
Qwen3NextAttention.o_proj |
1 | 0 | 0 |
attr |
Qwen3NextAttention.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3NextAttention.attn |
1 | 0 | 0 |
attr |
Qwen3NextAttention.q_norm |
1 | 0 | 0 |
attr |
Qwen3NextAttention.k_norm |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_next_mtp (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Qwen3NextMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
Qwen3NextMultiTokenPredictor.config |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.vocab_size |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.fc |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.norm |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.pre_fc_norm_hidden |
1 | 0 | 0 |
attr |
Qwen3NextMultiTokenPredictor.pre_fc_norm_embedding |
1 | 0 | 0 |
meth |
Qwen3NextMTP.init |
3 | 2 | 0 |
meth |
Qwen3NextMTP.forward |
7 | 6 | 0 |
attr |
Qwen3NextMTP.vllm_config |
1 | 0 | 0 |
attr |
Qwen3NextMTP.quant_config |
1 | 0 | 0 |
attr |
Qwen3NextMTP.config |
1 | 0 | 0 |
attr |
Qwen3NextMTP.model |
1 | 0 | 0 |
attr |
Qwen3NextMTP.lm_head |
1 | 0 | 0 |
attr |
Qwen3NextMTP.logits_processor |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_omni_moe_thinker (103 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3OmniMoeThinkerProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerProcessingInfo.get_feature_extractor |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Qwen3MoeLLMModel.init |
3 | 2 | 0 |
attr |
Qwen3MoeLLMModel.deepstack_multiscale_layer_start |
1 | 0 | 0 |
meth |
SinusoidsPositionEmbedding.init |
4 | 3 | 0 |
attr |
SinusoidsPositionEmbedding.length |
1 | 0 | 0 |
attr |
SinusoidsPositionEmbedding.channels |
1 | 0 | 0 |
attr |
SinusoidsPositionEmbedding.max_timescale |
1 | 0 | 0 |
meth |
Qwen3_VisionMLP.init |
7 | 6 | 0 |
meth |
Qwen3_VisionMLP.forward |
2 | 1 | 0 |
attr |
Qwen3_VisionMLP.linear_fc1 |
1 | 0 | 0 |
attr |
Qwen3_VisionMLP.linear_fc2 |
1 | 0 | 0 |
attr |
Qwen3_VisionMLP.act_fn |
1 | 0 | 0 |
meth |
Qwen3OmniMoeAudioEncoder.init |
3 | 2 | 0 |
meth |
Qwen3OmniMoeAudioEncoder.forward |
4 | 3 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.num_mel_bins |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.max_source_positions |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.n_window |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.n_window_infer |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.conv_chunksize |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.positional_embedding |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.conv2d1 |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.conv2d2 |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.conv2d3 |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.conv_out |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.ln_post |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.proj1 |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.act |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.proj2 |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoder.attn_backend |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchEmbed.temporal_patch_size |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchEmbed.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchEmbed.proj |
1 | 0 | 0 |
meth |
Qwen3OmniMoeAudioEncoderLayer.init |
3 | 2 | 0 |
attr |
Qwen3OmniMoeAudioEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoderLayer.self_attn_layer_norm |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoderLayer.activation_fn |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoderLayer.fc1 |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoderLayer.fc2 |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioEncoderLayer.final_layer_norm |
1 | 0 | 0 |
meth |
Qwen3MoeLLMForCausalLM.init |
3 | 2 | 0 |
attr |
Qwen3MoeLLMForCausalLM.config |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Qwen3OmniMoeAudioAttention.init |
3 | 2 | 0 |
attr |
Qwen3OmniMoeAudioAttention.embed_dim |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioAttention.num_heads |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioAttention.head_dim |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioAttention.num_local_heads |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioAttention.scaling |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioAttention.qkv |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioAttention.out_proj |
1 | 0 | 0 |
attr |
Qwen3OmniMoeAudioAttention.attn |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.vllm_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.quant_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.use_deepstack |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.deepstack_num_level |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.visual_dim |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.multiscale_dim |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.deepstack_input_embeds |
1 | 0 | 0 |
attr |
Qwen3_VisionBlock.norm1 |
1 | 0 | 0 |
attr |
Qwen3_VisionBlock.norm2 |
1 | 0 | 0 |
attr |
Qwen3_VisionBlock.attn |
1 | 0 | 0 |
attr |
Qwen3_VisionBlock.mlp |
1 | 0 | 0 |
meth |
Qwen3Omni_VisionTransformer.init |
5 | 4 | 0 |
meth |
Qwen3Omni_VisionTransformer.rot_pos_emb |
2 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.hidden_size |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.num_heads |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.image_size |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.patch_size |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.spatial_merge_unit |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.temporal_patch_size |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.num_grid_per_side |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.apply_vit_abs_pos_embed |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.deepstack_visual_indexes |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.blocks |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.merger |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.attn_backend |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.pos_embed |
1 | 0 | 0 |
attr |
Qwen3Omni_VisionTransformer.merger_list |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.use_postshuffle_norm |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.ln_q |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.mlp |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_vl (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_VisionTransformer.rot_pos_emb |
2 | 1 | 0 |
attr |
Qwen3_VisionTransformer.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.num_heads |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.num_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.patch_size |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.spatial_merge_unit |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.temporal_patch_size |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.deepstack_visual_indexes |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.num_grid_per_side |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.tp_size |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.out_hidden_size |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.patch_embed |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.pos_embed |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.merger |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.deepstack_merger_list |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.attn_backend |
1 | 0 | 0 |
attr |
Qwen3_VisionTransformer.blocks |
1 | 0 | 0 |
meth |
Qwen3_VisionMLP.init |
7 | 6 | 0 |
meth |
Qwen3_VisionMLP.forward |
2 | 1 | 0 |
attr |
Qwen3_VisionMLP.linear_fc1 |
1 | 0 | 0 |
attr |
Qwen3_VisionMLP.linear_fc2 |
1 | 0 | 0 |
attr |
Qwen3_VisionMLP.act_fn |
1 | 0 | 0 |
meth |
Qwen3VLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen3VLProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
Qwen3VLProcessingInfo._calculate_timestamps |
4 | 3 | 0 |
attr |
Qwen3_VisionPatchEmbed.patch_size |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchEmbed.temporal_patch_size |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchEmbed.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchEmbed.proj |
1 | 0 | 0 |
meth |
Qwen3LLMForCausalLM.init |
3 | 2 | 0 |
attr |
Qwen3LLMForCausalLM.config |
1 | 0 | 0 |
attr |
Qwen3LLMForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Qwen3LLMForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3LLMForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Qwen3LLMForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3LLMForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration.init |
3 | 2 | 0 |
meth |
Qwen3VLForConditionalGeneration._get_expanded_positions |
9 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration._get_mrope_input_positions |
4 | 3 | 0 |
attr |
Qwen3VLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.video_pruning_rate |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.is_multimodal_pruning_enabled |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.use_deepstack |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.deepstack_num_level |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.visual_dim |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.multiscale_dim |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.deepstack_input_embeds |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.use_postshuffle_norm |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.norm |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.linear_fc1 |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.act_fn |
1 | 0 | 0 |
attr |
Qwen3_VisionPatchMerger.linear_fc2 |
1 | 0 | 0 |
attr |
Qwen3_VisionBlock.norm1 |
1 | 0 | 0 |
attr |
Qwen3_VisionBlock.norm2 |
1 | 0 | 0 |
attr |
Qwen3_VisionBlock.attn |
1 | 0 | 0 |
attr |
Qwen3_VisionBlock.mlp |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.qwen3_vl_moe (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLMoeProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
Qwen3VLMoeForConditionalGeneration.init |
3 | 2 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.packed_modules_mapping |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.video_pruning_rate |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.is_multimodal_pruning_enabled |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.use_deepstack |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.deepstack_num_level |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.visual_dim |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.multiscale_dim |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.deepstack_input_embeds |
1 | 0 | 0 |
meth |
Qwen3VLMoeMixtureOfExperts.set_moe_parameters |
1 | 0 | 0 |
meth |
Qwen3MoeLLMForCausalLM.init |
3 | 2 | 0 |
attr |
Qwen3MoeLLMForCausalLM.config |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Qwen3MoeLLMForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.qwen_vl (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TransformerBlock.init |
8 | 7 | 0 |
attr |
TransformerBlock.width |
1 | 0 | 0 |
attr |
TransformerBlock.layers |
1 | 0 | 0 |
attr |
TransformerBlock.resblocks |
1 | 0 | 0 |
meth |
QwenVLMLP.init |
5 | 4 | 0 |
meth |
QwenVLMLP.forward |
2 | 0 | 0 |
attr |
QwenVLMLP.c_fc |
1 | 0 | 0 |
attr |
QwenVLMLP.act_fn |
1 | 0 | 0 |
attr |
QwenVLMLP.c_proj |
1 | 0 | 0 |
meth |
VisionTransformer.init |
13 | 11 | 0 |
attr |
VisionTransformer.image_size |
1 | 0 | 0 |
attr |
VisionTransformer.patch_size |
1 | 0 | 0 |
attr |
VisionTransformer.grid_size |
1 | 0 | 0 |
attr |
VisionTransformer.output_dim |
1 | 0 | 0 |
attr |
VisionTransformer.conv1 |
1 | 0 | 0 |
attr |
VisionTransformer.positional_embedding |
1 | 0 | 0 |
attr |
VisionTransformer.ln_pre |
1 | 0 | 0 |
attr |
VisionTransformer.transformer |
1 | 0 | 0 |
attr |
VisionTransformer.attn_pool |
1 | 0 | 0 |
attr |
VisionTransformer.ln_post |
1 | 0 | 0 |
attr |
VisionTransformer.proj |
1 | 0 | 0 |
attr |
VisionTransformer.image_start_id |
1 | 0 | 0 |
attr |
VisionTransformer.image_end_id |
1 | 0 | 0 |
attr |
VisionTransformer.image_pad_id |
1 | 0 | 0 |
meth |
VisualAttentionBlock.init |
7 | 6 | 0 |
attr |
VisualAttentionBlock.ln_1 |
1 | 0 | 0 |
attr |
VisualAttentionBlock.ln_2 |
1 | 0 | 0 |
attr |
VisualAttentionBlock.attn |
1 | 0 | 0 |
attr |
VisualAttentionBlock.mlp |
1 | 0 | 0 |
attr |
QwenVLProcessor.config |
1 | 0 | 0 |
attr |
QwenVLProcessor.tokenizer |
1 | 0 | 0 |
attr |
QwenVLProcessor.image_transform |
1 | 0 | 0 |
meth |
VisualAttention.init |
7 | 6 | 0 |
attr |
VisualAttention.embed_dim |
1 | 0 | 0 |
attr |
VisualAttention.kdim |
1 | 0 | 0 |
attr |
VisualAttention.vdim |
1 | 0 | 0 |
attr |
VisualAttention.num_heads |
1 | 0 | 0 |
attr |
VisualAttention.hidden_size_per_attention_head |
1 | 0 | 0 |
attr |
VisualAttention.num_attention_heads_per_partition |
1 | 0 | 0 |
attr |
VisualAttention.hidden_size_per_partition |
1 | 0 | 0 |
attr |
VisualAttention.in_proj |
1 | 0 | 0 |
attr |
VisualAttention.out_proj |
1 | 0 | 0 |
attr |
VisualAttention.norm_factor |
1 | 0 | 0 |
meth |
QwenVLModel.init |
3 | 2 | 0 |
attr |
QwenVLModel.visual |
1 | 0 | 0 |
vllm.model_executor.models.radio (60 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViTPatchLinear.init |
5 | 3 | 0 |
attr |
ViTPatchLinear.patch_size |
1 | 0 | 0 |
attr |
to_4tuple |
1 | 0 | 0 |
meth |
RadioVisionEncoder.init |
3 | 1 | 0 |
meth |
RadioVisionEncoder.forward |
3 | 2 | 0 |
meth |
ClsToken.init |
6 | 5 | 0 |
meth |
ClsToken.forward |
2 | 1 | 0 |
attr |
ClsToken.ndim |
1 | 0 | 0 |
attr |
ClsToken.enabled |
1 | 0 | 0 |
attr |
ClsToken.num_registers |
1 | 0 | 0 |
attr |
ClsToken.num_tokens |
1 | 0 | 0 |
attr |
ClsToken.num_patches |
1 | 0 | 0 |
attr |
ClsToken.token |
1 | 0 | 0 |
attr |
to_2tuple |
1 | 0 | 0 |
meth |
RadioInternVisionModel._init_img_size |
3 | 1 | 0 |
meth |
RadioInternVisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
RadioInternVisionModel.config |
1 | 0 | 0 |
attr |
RadioInternVisionModel.patch_generator |
1 | 0 | 0 |
attr |
RadioInternVisionModel.encoder |
1 | 0 | 0 |
attr |
to_3tuple |
1 | 0 | 0 |
meth |
Im2Patches.init |
2 | 1 | 0 |
attr |
Im2Patches.patch_size |
1 | 0 | 0 |
meth |
RadioVisionEncoderLayer.init |
3 | 1 | 0 |
meth |
RadioVisionEncoderLayer.forward |
3 | 2 | 0 |
attr |
to_1tuple |
1 | 0 | 0 |
meth |
ViTPatchGenerator.init |
16 | 13 | 0 |
meth |
ViTPatchGenerator._load_embed |
3 | 2 | 0 |
meth |
ViTPatchGenerator._load_projection |
3 | 2 | 0 |
meth |
ViTPatchGenerator._get_pos_embeddings |
3 | 2 | 0 |
prop |
ViTPatchGenerator.apply_cls_token |
1 | 0 | 0 |
prop |
ViTPatchGenerator.num_cls_tokens |
1 | 0 | 0 |
prop |
ViTPatchGenerator.num_cls_patches |
1 | 0 | 0 |
prop |
ViTPatchGenerator.num_registers |
1 | 0 | 0 |
prop |
ViTPatchGenerator.num_skip |
1 | 0 | 0 |
attr |
ViTPatchGenerator.cpe_mode |
1 | 0 | 0 |
attr |
ViTPatchGenerator.pos_dropout |
1 | 0 | 0 |
attr |
ViTPatchGenerator.return_pos_enc |
1 | 0 | 0 |
attr |
ViTPatchGenerator.patch_size |
1 | 0 | 0 |
attr |
ViTPatchGenerator.abs_pos |
1 | 0 | 0 |
attr |
ViTPatchGenerator.embed_dim |
1 | 0 | 0 |
attr |
ViTPatchGenerator.num_rows |
1 | 0 | 0 |
attr |
ViTPatchGenerator.num_cols |
1 | 0 | 0 |
attr |
ViTPatchGenerator.input_dims |
1 | 0 | 0 |
attr |
ViTPatchGenerator.num_patches |
1 | 0 | 0 |
attr |
ViTPatchGenerator.max_input_dims |
1 | 0 | 0 |
attr |
ViTPatchGenerator.im_to_patches |
1 | 0 | 0 |
attr |
ViTPatchGenerator.embedder |
1 | 0 | 0 |
attr |
ViTPatchGenerator.cls_token |
1 | 0 | 0 |
attr |
ViTPatchGenerator.patch_normalizer |
1 | 0 | 0 |
attr |
ViTPatchGenerator.pos_embed |
1 | 0 | 0 |
meth |
RadioModel.load_weights |
2 | 1 | 0 |
attr |
RadioModel.config |
1 | 0 | 0 |
attr |
RadioModel.model |
1 | 0 | 0 |
attr |
RadioModel.summary_idxs |
1 | 0 | 0 |
vllm.model_executor.models.registry (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ModelRegistry |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.roberta (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RobertaClassificationHead.init |
2 | 1 | 0 |
attr |
RobertaClassificationHead.dense |
1 | 0 | 0 |
attr |
RobertaClassificationHead.out_proj |
1 | 0 | 0 |
meth |
BgeM3EmbeddingModel.init |
3 | 2 | 0 |
meth |
BgeM3EmbeddingModel.load_weights |
2 | 1 | 0 |
attr |
BgeM3EmbeddingModel.hidden_size |
1 | 0 | 0 |
attr |
BgeM3EmbeddingModel.head_dtype |
1 | 0 | 0 |
attr |
BgeM3EmbeddingModel.bos_token_id |
1 | 0 | 0 |
attr |
BgeM3EmbeddingModel.eos_token_id |
1 | 0 | 0 |
attr |
BgeM3EmbeddingModel.secondary_weight_prefixes |
1 | 0 | 0 |
attr |
BgeM3EmbeddingModel.secondary_weight_files |
1 | 0 | 0 |
attr |
BgeM3EmbeddingModel.secondary_weights |
1 | 0 | 0 |
meth |
RobertaEmbeddingModel.init |
3 | 2 | 0 |
meth |
RobertaEmbeddingModel.load_weights |
2 | 1 | 0 |
meth |
RobertaForSequenceClassification.init |
3 | 2 | 0 |
meth |
RobertaForSequenceClassification.load_weights |
2 | 1 | 0 |
attr |
RobertaForSequenceClassification.jina_to_vllm_mapper |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.roberta |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.pooler |
1 | 0 | 0 |
meth |
RobertaEmbedding.init |
2 | 1 | 0 |
attr |
RobertaEmbedding.size |
1 | 0 | 0 |
attr |
RobertaEmbedding.word_embeddings |
1 | 0 | 0 |
attr |
RobertaEmbedding.padding_idx |
1 | 0 | 0 |
attr |
RobertaEmbedding.position_embeddings |
1 | 0 | 0 |
attr |
RobertaEmbedding.token_type_embeddings |
1 | 0 | 0 |
attr |
RobertaEmbedding.LayerNorm |
1 | 0 | 0 |
vllm.model_executor.models.rvl (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
RForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
RForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
meth |
RVLMultiModalProjector.init |
2 | 0 | 0 |
attr |
RVLMultiModalProjector.pre_norm |
1 | 0 | 0 |
attr |
RVLMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
RVLMultiModalProjector.act |
1 | 0 | 0 |
attr |
RVLMultiModalProjector.linear_2 |
1 | 0 | 0 |
meth |
RVLProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
RVLProcessingInfo.get_hf_processor |
2 | 1 | 0 |
vllm.model_executor.models.seed_oss (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
SeedOssDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
SeedOssDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
SeedOssDecoderLayer.mlp |
1 | 0 | 0 |
attr |
SeedOssDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
SeedOssDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
SeedOssMLP.forward |
2 | 0 | 0 |
attr |
SeedOssMLP.gate_up_proj |
1 | 0 | 0 |
attr |
SeedOssMLP.down_proj |
1 | 0 | 0 |
attr |
SeedOssMLP.act_fn |
1 | 0 | 0 |
attr |
SeedOssAttention.hidden_size |
1 | 0 | 0 |
attr |
SeedOssAttention.total_num_heads |
1 | 0 | 0 |
attr |
SeedOssAttention.num_heads |
1 | 0 | 0 |
attr |
SeedOssAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
SeedOssAttention.head_dim |
1 | 0 | 0 |
attr |
SeedOssAttention.num_kv_heads |
1 | 0 | 0 |
attr |
SeedOssAttention.q_size |
1 | 0 | 0 |
attr |
SeedOssAttention.kv_size |
1 | 0 | 0 |
attr |
SeedOssAttention.scaling |
1 | 0 | 0 |
attr |
SeedOssAttention.qkv_proj |
1 | 0 | 0 |
attr |
SeedOssAttention.o_proj |
1 | 0 | 0 |
attr |
SeedOssAttention.rotary_emb |
1 | 0 | 0 |
attr |
SeedOssAttention.attn |
1 | 0 | 0 |
meth |
SeedOssForCausalLM.init |
3 | 2 | 0 |
attr |
SeedOssForCausalLM.config |
1 | 0 | 0 |
attr |
SeedOssForCausalLM.quant_config |
1 | 0 | 0 |
attr |
SeedOssForCausalLM.model |
1 | 0 | 0 |
attr |
SeedOssForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
SeedOssForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
SeedOssForCausalLM.lm_head |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
SeedOssModel.init |
4 | 3 | 0 |
attr |
SeedOssModel.config |
1 | 0 | 0 |
attr |
SeedOssModel.quant_config |
1 | 0 | 0 |
attr |
SeedOssModel.vocab_size |
1 | 0 | 0 |
attr |
SeedOssModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
SeedOssModel.embed_tokens |
1 | 0 | 0 |
attr |
SeedOssModel.norm |
1 | 0 | 0 |
vllm.model_executor.models.siglip (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SiglipProcessingInfo.get_hf_config |
1 | 0 | 0 |
meth |
SiglipProcessingInfo.get_vision_encoder_info |
1 | 0 | 0 |
meth |
SiglipProcessingInfo.get_hf_processor |
2 | 1 | 0 |
attr |
SiglipAttention.config |
1 | 0 | 0 |
attr |
SiglipAttention.embed_dim |
1 | 0 | 0 |
attr |
SiglipAttention.num_heads |
1 | 0 | 0 |
attr |
SiglipAttention.head_dim |
1 | 0 | 0 |
attr |
SiglipAttention.scale |
1 | 0 | 0 |
attr |
SiglipAttention.qkv_proj |
1 | 0 | 0 |
attr |
SiglipAttention.out_proj |
1 | 0 | 0 |
attr |
SiglipAttention.tp_size |
1 | 0 | 0 |
attr |
SiglipAttention.num_heads_per_partition |
1 | 0 | 0 |
attr |
SiglipAttention.attn |
1 | 0 | 0 |
meth |
SiglipEmbeddingModel.init |
3 | 2 | 0 |
meth |
SiglipEmbeddingModel.load_weights |
2 | 1 | 0 |
attr |
SiglipEmbeddingModel.config |
1 | 0 | 0 |
attr |
SiglipEmbeddingModel.text_embed_dim |
1 | 0 | 0 |
attr |
SiglipEmbeddingModel.vision_embed_dim |
1 | 0 | 0 |
attr |
SiglipEmbeddingModel.text_projection_size |
1 | 0 | 0 |
attr |
SiglipEmbeddingModel.pooler_config |
1 | 0 | 0 |
attr |
SiglipEmbeddingModel.pooler |
1 | 0 | 0 |
attr |
SiglipEmbeddingModel.text_model |
1 | 0 | 0 |
attr |
SiglipEmbeddingModel.vision_model |
1 | 0 | 0 |
meth |
SiglipVisionEmbeddings.init |
2 | 1 | 0 |
attr |
SiglipVisionEmbeddings.config |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
SiglipVisionEmbeddings.position_embedding |
1 | 0 | 0 |
attr |
SiglipEncoder.config |
1 | 0 | 0 |
attr |
SiglipEncoder.layers |
1 | 0 | 0 |
attr |
SiglipTextTransformer.config |
1 | 0 | 0 |
attr |
SiglipTextTransformer.embeddings |
1 | 0 | 0 |
attr |
SiglipTextTransformer.encoder |
1 | 0 | 0 |
attr |
SiglipTextTransformer.final_layer_norm |
1 | 0 | 0 |
attr |
SiglipTextTransformer.head |
1 | 0 | 0 |
prop |
SiglipVisionModel.dtype |
1 | 0 | 0 |
prop |
SiglipVisionModel.device |
1 | 0 | 0 |
attr |
SiglipVisionModel.quant_config |
1 | 0 | 0 |
attr |
SiglipVisionModel.vision_model |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.mlp |
1 | 0 | 0 |
attr |
SiglipEncoderLayer.layer_norm2 |
1 | 0 | 0 |
prop |
SiglipVisionTransformer.dtype |
1 | 0 | 0 |
prop |
SiglipVisionTransformer.device |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.config |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.encoder |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.head |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.last_hs_proc |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.post_layernorm |
1 | 0 | 0 |
attr |
SiglipVisionTransformer.use_head |
1 | 0 | 0 |
attr |
SiglipMLP.config |
1 | 0 | 0 |
attr |
SiglipMLP.activation_fn |
1 | 0 | 0 |
attr |
SiglipMLP.fc1 |
1 | 0 | 0 |
attr |
SiglipMLP.fc2 |
1 | 0 | 0 |
meth |
SiglipTextEmbeddings.init |
2 | 1 | 0 |
attr |
SiglipTextEmbeddings.config |
1 | 0 | 0 |
attr |
SiglipTextEmbeddings.token_embedding |
1 | 0 | 0 |
attr |
SiglipTextEmbeddings.position_embedding |
1 | 0 | 0 |
attr |
SiglipMultiheadAttentionPoolingHead.probe |
1 | 0 | 0 |
attr |
SiglipMultiheadAttentionPoolingHead.attention |
1 | 0 | 0 |
attr |
SiglipMultiheadAttentionPoolingHead.layernorm |
1 | 0 | 0 |
attr |
SiglipMultiheadAttentionPoolingHead.mlp |
1 | 0 | 0 |
vllm.model_executor.models.siglip2navit (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2VisionEmbeddings.init |
2 | 1 | 0 |
attr |
Siglip2VisionEmbeddings.config |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.preserve_original_pe |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.hidden_stride |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.position_embedding_size |
1 | 0 | 0 |
attr |
Siglip2VisionEmbeddings.position_embedding |
1 | 0 | 0 |
meth |
Siglip2Encoder.init |
4 | 3 | 0 |
meth |
Siglip2Encoder.rot_pos_emb |
2 | 0 | 0 |
meth |
Siglip2Encoder.get_window_index |
2 | 0 | 0 |
attr |
Siglip2Encoder.config |
1 | 0 | 0 |
attr |
Siglip2Encoder.layers |
1 | 0 | 0 |
attr |
Siglip2Encoder.rotary_pos_emb |
1 | 0 | 0 |
attr |
Siglip2Encoder.patch_size |
1 | 0 | 0 |
attr |
Siglip2Encoder.hidden_stride |
1 | 0 | 0 |
attr |
Siglip2Encoder.window_size |
1 | 0 | 0 |
attr |
Siglip2Encoder.spatial_merge_unit |
1 | 0 | 0 |
attr |
Siglip2Encoder.fullatt_block_indexes |
1 | 0 | 0 |
meth |
Siglip2MLP.init |
4 | 3 | 0 |
attr |
Siglip2MLP.config |
1 | 0 | 0 |
attr |
Siglip2MLP.activation_fn |
1 | 0 | 0 |
attr |
Siglip2MLP.fc1 |
1 | 0 | 0 |
attr |
Siglip2MLP.fc2 |
1 | 0 | 0 |
meth |
Siglip2Attention.init |
4 | 3 | 0 |
attr |
Siglip2Attention.config |
1 | 0 | 0 |
attr |
Siglip2Attention.embed_dim |
1 | 0 | 0 |
attr |
Siglip2Attention.num_heads |
1 | 0 | 0 |
attr |
Siglip2Attention.head_dim |
1 | 0 | 0 |
attr |
Siglip2Attention.scale |
1 | 0 | 0 |
attr |
Siglip2Attention.dropout |
1 | 0 | 0 |
attr |
Siglip2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Siglip2Attention.out_proj |
1 | 0 | 0 |
attr |
Siglip2Attention.tp_size |
1 | 0 | 0 |
attr |
Siglip2Attention.num_heads_per_partition |
1 | 0 | 0 |
attr |
Siglip2Attention.use_rope |
1 | 0 | 0 |
attr |
Siglip2Attention.attn |
1 | 0 | 0 |
attr |
Siglip2Attention.apply_rotary_emb |
1 | 0 | 0 |
meth |
Siglip2EncoderLayer.init |
4 | 3 | 0 |
attr |
Siglip2EncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.self_attn |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.layer_norm2 |
1 | 0 | 0 |
attr |
Siglip2EncoderLayer.mlp |
1 | 0 | 0 |
meth |
Siglip2NavitModel.init |
4 | 3 | 0 |
attr |
Siglip2NavitModel.vision_model |
1 | 0 | 0 |
meth |
Siglip2VisionTransformer.init |
4 | 3 | 0 |
attr |
Siglip2VisionTransformer.config |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.embeddings |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.encoder |
1 | 0 | 0 |
attr |
Siglip2VisionTransformer.post_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.skyworkr1v (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
build_transform |
2 | 1 | 0 |
meth |
SkyworkR1VChatModel._patch_quant_config |
3 | 2 | 0 |
meth |
SkyworkR1VChatModel._init_vision_model |
5 | 4 | 0 |
meth |
SkyworkR1VChatModel.pixel_shuffle |
3 | 0 | 0 |
attr |
SkyworkR1VChatModel.config |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.multimodal_config |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.patch_size |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.num_image_token |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.downsample_ratio |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.ps_version |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.is_mono |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.img_context_token_id |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.visual_token_mask |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.vision_model |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.mlp1 |
1 | 0 | 0 |
attr |
SkyworkR1VChatModel.language_model |
1 | 0 | 0 |
attr |
SkyworkR1VProcessor.config |
1 | 0 | 0 |
attr |
SkyworkR1VProcessor.tokenizer |
1 | 0 | 0 |
attr |
SkyworkR1VProcessor.num_image_token |
1 | 0 | 0 |
attr |
SkyworkR1VProcessor.image_size |
1 | 0 | 0 |
attr |
SkyworkR1VProcessor.min_dynamic_patch |
1 | 0 | 0 |
attr |
SkyworkR1VProcessor.max_dynamic_patch |
1 | 0 | 0 |
attr |
SkyworkR1VProcessor.dynamic_image_size |
1 | 0 | 0 |
vllm.model_executor.models.smolvlm (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolVLMForConditionalGeneration.init |
3 | 2 | 0 |
vllm.model_executor.models.solar (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SolarForCausalLM.init |
3 | 2 | 0 |
attr |
SolarForCausalLM.config |
1 | 0 | 0 |
attr |
SolarForCausalLM.quant_config |
1 | 0 | 0 |
attr |
SolarForCausalLM.model |
1 | 0 | 0 |
attr |
SolarForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
SolarForCausalLM.lm_head |
1 | 0 | 0 |
attr |
SolarForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
SolarDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
SolarDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
SolarDecoderLayer.mlp |
1 | 0 | 0 |
attr |
SolarDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
SolarDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
SolarModel.init |
3 | 2 | 0 |
attr |
SolarModel.config |
1 | 0 | 0 |
attr |
SolarModel.quant_config |
1 | 0 | 0 |
attr |
SolarModel.vocab_size |
1 | 0 | 0 |
attr |
SolarModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
SolarModel.embed_tokens |
1 | 0 | 0 |
attr |
SolarModel.norm |
1 | 0 | 0 |
attr |
SolarAttention.hidden_size |
1 | 0 | 0 |
attr |
SolarAttention.total_num_heads |
1 | 0 | 0 |
attr |
SolarAttention.num_heads |
1 | 0 | 0 |
attr |
SolarAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
SolarAttention.num_kv_heads |
1 | 0 | 0 |
attr |
SolarAttention.head_dim |
1 | 0 | 0 |
attr |
SolarAttention.q_size |
1 | 0 | 0 |
attr |
SolarAttention.kv_size |
1 | 0 | 0 |
attr |
SolarAttention.scaling |
1 | 0 | 0 |
attr |
SolarAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
SolarAttention.qkv_proj |
1 | 0 | 0 |
attr |
SolarAttention.o_proj |
1 | 0 | 0 |
attr |
SolarAttention.rotary_emb |
1 | 0 | 0 |
attr |
SolarAttention.attn |
1 | 0 | 0 |
meth |
SolarMLP.forward |
2 | 0 | 0 |
attr |
SolarMLP.gate_up_proj |
1 | 0 | 0 |
attr |
SolarMLP.down_proj |
1 | 0 | 0 |
attr |
SolarMLP.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.stablelm (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
StablelmAttention.config |
1 | 0 | 0 |
attr |
StablelmAttention.hidden_size |
1 | 0 | 0 |
attr |
StablelmAttention.total_num_heads |
1 | 0 | 0 |
attr |
StablelmAttention.num_heads |
1 | 0 | 0 |
attr |
StablelmAttention.total_num_key_value_heads |
1 | 0 | 0 |
attr |
StablelmAttention.num_key_value_heads |
1 | 0 | 0 |
attr |
StablelmAttention.head_dim |
1 | 0 | 0 |
attr |
StablelmAttention.max_position_embeddings |
1 | 0 | 0 |
attr |
StablelmAttention.scaling |
1 | 0 | 0 |
attr |
StablelmAttention.q_size |
1 | 0 | 0 |
attr |
StablelmAttention.kv_size |
1 | 0 | 0 |
attr |
StablelmAttention.qkv_bias |
1 | 0 | 0 |
attr |
StablelmAttention.qkv_proj |
1 | 0 | 0 |
attr |
StablelmAttention.o_proj |
1 | 0 | 0 |
attr |
StablelmAttention.rotary_emb |
1 | 0 | 0 |
attr |
StablelmAttention.attn |
1 | 0 | 0 |
attr |
StablelmMLP.config |
1 | 0 | 0 |
attr |
StablelmMLP.hidden_size |
1 | 0 | 0 |
attr |
StablelmMLP.intermediate_size |
1 | 0 | 0 |
attr |
StablelmMLP.gate_up_proj |
1 | 0 | 0 |
attr |
StablelmMLP.down_proj |
1 | 0 | 0 |
attr |
StablelmMLP.act_fn |
1 | 0 | 0 |
meth |
StableLMEpochModel.init |
3 | 2 | 0 |
attr |
StableLMEpochModel.embed_tokens |
1 | 0 | 0 |
attr |
StableLMEpochModel.norm |
1 | 0 | 0 |
attr |
StableLMEpochModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
StablelmDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
StablelmDecoderLayer.mlp |
1 | 0 | 0 |
attr |
StablelmDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
StablelmDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
StablelmForCausalLM.init |
3 | 2 | 0 |
attr |
StablelmForCausalLM.config |
1 | 0 | 0 |
attr |
StablelmForCausalLM.quant_config |
1 | 0 | 0 |
attr |
StablelmForCausalLM.model |
1 | 0 | 0 |
attr |
StablelmForCausalLM.lm_head |
1 | 0 | 0 |
attr |
StablelmForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
StablelmForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.starcoder2 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Starcoder2ForCausalLM.init |
3 | 2 | 0 |
attr |
Starcoder2ForCausalLM.config |
1 | 0 | 0 |
attr |
Starcoder2ForCausalLM.model |
1 | 0 | 0 |
attr |
Starcoder2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Starcoder2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Starcoder2ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Starcoder2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Starcoder2Model.init |
3 | 2 | 0 |
attr |
Starcoder2Model.config |
1 | 0 | 0 |
attr |
Starcoder2Model.vocab_size |
1 | 0 | 0 |
attr |
Starcoder2Model.embed_tokens |
1 | 0 | 0 |
attr |
Starcoder2Model.norm |
1 | 0 | 0 |
attr |
Starcoder2Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
meth |
Starcoder2Attention.init |
5 | 4 | 0 |
attr |
Starcoder2Attention.config |
1 | 0 | 0 |
attr |
Starcoder2Attention.hidden_size |
1 | 0 | 0 |
attr |
Starcoder2Attention.total_num_heads |
1 | 0 | 0 |
attr |
Starcoder2Attention.num_heads |
1 | 0 | 0 |
attr |
Starcoder2Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Starcoder2Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Starcoder2Attention.head_dim |
1 | 0 | 0 |
attr |
Starcoder2Attention.q_size |
1 | 0 | 0 |
attr |
Starcoder2Attention.kv_size |
1 | 0 | 0 |
attr |
Starcoder2Attention.scaling |
1 | 0 | 0 |
attr |
Starcoder2Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Starcoder2Attention.use_bias |
1 | 0 | 0 |
attr |
Starcoder2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Starcoder2Attention.o_proj |
1 | 0 | 0 |
attr |
Starcoder2Attention.rotary_emb |
1 | 0 | 0 |
attr |
Starcoder2Attention.attn |
1 | 0 | 0 |
meth |
Starcoder2MLP.init |
4 | 3 | 0 |
attr |
Starcoder2MLP.c_fc |
1 | 0 | 0 |
attr |
Starcoder2MLP.c_proj |
1 | 0 | 0 |
attr |
Starcoder2MLP.act |
1 | 0 | 0 |
meth |
Starcoder2DecoderLayer.init |
5 | 4 | 0 |
attr |
Starcoder2DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Starcoder2DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Starcoder2DecoderLayer.mlp |
1 | 0 | 0 |
attr |
Starcoder2DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Starcoder2DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
vllm.model_executor.models.step1 (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Step1ForCausalLM.init |
3 | 2 | 0 |
attr |
Step1ForCausalLM.config |
1 | 0 | 0 |
attr |
Step1ForCausalLM.quant_config |
1 | 0 | 0 |
attr |
Step1ForCausalLM.model |
1 | 0 | 0 |
attr |
Step1ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Step1ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Step1ForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
StepDecoderModel.init |
3 | 2 | 0 |
attr |
StepDecoderModel.config |
1 | 0 | 0 |
attr |
StepDecoderModel.quant_config |
1 | 0 | 0 |
attr |
StepDecoderModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
StepDecoderModel.embed_tokens |
1 | 0 | 0 |
attr |
StepDecoderModel.norm |
1 | 0 | 0 |
meth |
StepDecoderLayer.init |
3 | 2 | 0 |
attr |
StepDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
StepDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
StepDecoderLayer.mlp |
1 | 0 | 0 |
attr |
StepDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
StepDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
meth |
StepAttention.init |
5 | 3 | 0 |
attr |
StepAttention.hidden_size |
1 | 0 | 0 |
attr |
StepAttention.total_num_heads |
1 | 0 | 0 |
attr |
StepAttention.num_heads |
1 | 0 | 0 |
attr |
StepAttention.head_dim |
1 | 0 | 0 |
attr |
StepAttention.total_num_kv_heads |
1 | 0 | 0 |
attr |
StepAttention.num_kv_heads |
1 | 0 | 0 |
attr |
StepAttention.qkv_proj |
1 | 0 | 0 |
attr |
StepAttention.q_size |
1 | 0 | 0 |
attr |
StepAttention.kv_size |
1 | 0 | 0 |
attr |
StepAttention.o_proj |
1 | 0 | 0 |
attr |
StepAttention.scale |
1 | 0 | 0 |
attr |
StepAttention.attn |
1 | 0 | 0 |
meth |
StepMLP.init |
6 | 5 | 0 |
attr |
StepMLP.gate_up_proj |
1 | 0 | 0 |
attr |
StepMLP.down_proj |
1 | 0 | 0 |
attr |
StepMLP.act_fn |
1 | 0 | 0 |
vllm.model_executor.models.step3_text (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FusedMoEBlock.init |
4 | 3 | 0 |
attr |
FusedMoEBlock.tp_size |
1 | 0 | 0 |
attr |
FusedMoEBlock.experts |
1 | 0 | 0 |
attr |
FusedMoEBlock.gate |
1 | 0 | 0 |
attr |
Step3TextDecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Step3TextDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Step3TextDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Step3TextDecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Step3TextDecoderLayer.moe |
1 | 0 | 0 |
attr |
Step3TextDecoderLayer.share_expert |
1 | 0 | 0 |
attr |
Step3TextDecoderLayer.use_moe |
1 | 0 | 0 |
attr |
Step3TextDecoderLayer.mlp |
1 | 0 | 0 |
meth |
Step3TextAttention.init |
12 | 11 | 0 |
attr |
Step3TextAttention.hidden_size |
1 | 0 | 0 |
attr |
Step3TextAttention.total_num_heads |
1 | 0 | 0 |
attr |
Step3TextAttention.num_heads |
1 | 0 | 0 |
attr |
Step3TextAttention.num_kv_heads |
1 | 0 | 0 |
attr |
Step3TextAttention.head_dim |
1 | 0 | 0 |
attr |
Step3TextAttention.kv_size |
1 | 0 | 0 |
attr |
Step3TextAttention.q_size |
1 | 0 | 0 |
attr |
Step3TextAttention.qkv_proj |
1 | 0 | 0 |
attr |
Step3TextAttention.o_proj |
1 | 0 | 0 |
attr |
Step3TextAttention.inter_norm |
1 | 0 | 0 |
attr |
Step3TextAttention.wq |
1 | 0 | 0 |
attr |
Step3TextAttention.rotary_emb |
1 | 0 | 0 |
attr |
Step3TextAttention.attn |
1 | 0 | 0 |
attr |
Step3TextModel.vocab_size |
1 | 0 | 0 |
attr |
Step3TextModel.config |
1 | 0 | 0 |
attr |
Step3TextModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Step3TextModel.embed_tokens |
1 | 0 | 0 |
attr |
Step3TextModel.norm |
1 | 0 | 0 |
attr |
Step3TextMLP.gate_up_proj |
1 | 0 | 0 |
attr |
Step3TextMLP.down_proj |
1 | 0 | 0 |
attr |
Step3TextMLP.act_fn |
1 | 0 | 0 |
attr |
Step3TextMLP.hidden_size |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Step3TextForCausalLM.init |
3 | 2 | 0 |
meth |
Step3TextForCausalLM.forward |
5 | 4 | 0 |
attr |
Step3TextForCausalLM.config |
1 | 0 | 0 |
attr |
Step3TextForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
Step3TextForCausalLM.model |
1 | 0 | 0 |
attr |
Step3TextForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Step3TextForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Step3TextForCausalLM.logits_processor |
1 | 0 | 0 |
vllm.model_executor.models.step3_vl (87 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Step3VisionEmbeddings.init |
2 | 1 | 0 |
attr |
Step3VisionEmbeddings.config |
1 | 0 | 0 |
attr |
Step3VisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
Step3VisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
Step3VisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
Step3VisionEmbeddings.class_embedding |
1 | 0 | 0 |
attr |
Step3VisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
Step3VisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
Step3VisionEmbeddings.pad_tp_size |
1 | 0 | 0 |
attr |
Step3VisionEmbeddings.position_embedding |
1 | 0 | 0 |
meth |
Step3VisionAttention.init |
4 | 2 | 0 |
meth |
Step3VisionAttention.forward |
2 | 1 | 0 |
attr |
Step3VisionAttention.config |
1 | 0 | 0 |
attr |
Step3VisionAttention.embed_dim |
1 | 0 | 0 |
attr |
Step3VisionAttention.total_num_heads |
1 | 0 | 0 |
attr |
Step3VisionAttention.head_dim |
1 | 0 | 0 |
attr |
Step3VisionAttention.scale |
1 | 0 | 0 |
attr |
Step3VisionAttention.num_heads |
1 | 0 | 0 |
attr |
Step3VisionAttention.q_size |
1 | 0 | 0 |
attr |
Step3VisionAttention.qkv_proj |
1 | 0 | 0 |
attr |
Step3VisionAttention.out_proj |
1 | 0 | 0 |
attr |
Step3VisionAttention.attn |
1 | 0 | 0 |
meth |
Step3VisionMLP.init |
4 | 2 | 0 |
attr |
Step3VisionMLP.config |
1 | 0 | 0 |
attr |
Step3VisionMLP.activation_fn |
1 | 0 | 0 |
attr |
Step3VisionMLP.fc1 |
1 | 0 | 0 |
attr |
Step3VisionMLP.fc2 |
1 | 0 | 0 |
meth |
Step3VisionTransformer.init |
4 | 3 | 0 |
meth |
Step3VisionTransformer.forward |
2 | 1 | 0 |
attr |
Step3VisionTransformer.config |
1 | 0 | 0 |
attr |
Step3VisionTransformer.use_data_parallel |
1 | 0 | 0 |
attr |
Step3VisionTransformer.image_size |
1 | 0 | 0 |
attr |
Step3VisionTransformer.embeddings |
1 | 0 | 0 |
attr |
Step3VisionTransformer.transformer |
1 | 0 | 0 |
meth |
Step3VisionProcessor.init |
4 | 0 | 0 |
meth |
Step3VisionProcessor.call |
3 | 0 | 0 |
attr |
Step3VisionProcessor.transform |
1 | 0 | 0 |
attr |
Step3VisionProcessor.patch_transform |
1 | 0 | 0 |
attr |
Step3VLProcessor.config |
1 | 0 | 0 |
attr |
Step3VLProcessor.tokenizer |
1 | 0 | 0 |
attr |
Step3VLProcessor.image_size |
1 | 0 | 0 |
attr |
Step3VLProcessor.patch_size |
1 | 0 | 0 |
attr |
Step3VLProcessor.image_preprocessor |
1 | 0 | 0 |
attr |
Step3VLProcessor.num_image_feature_size |
1 | 0 | 0 |
attr |
Step3VLProcessor.num_patch_feature_size |
1 | 0 | 0 |
attr |
Step3VLProcessor.image_token |
1 | 0 | 0 |
attr |
Step3VLProcessor.image_feature_placeholder |
1 | 0 | 0 |
attr |
Step3VLProcessor.patch_feature_placeholder |
1 | 0 | 0 |
attr |
Step3VLProcessor.patcher |
1 | 0 | 0 |
meth |
Step3VLForConditionalGeneration.embed_multimodal |
2 | 1 | 0 |
meth |
Step3VLForConditionalGeneration.load_weights |
2 | 1 | 0 |
prop |
Step3VLForConditionalGeneration.device |
1 | 0 | 0 |
prop |
Step3VLForConditionalGeneration.dtype |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.vit_downsampler |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.vit_downsampler2 |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.vit_large_projector |
1 | 0 | 0 |
attr |
Step3VLForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
ImagePatcher.get_image_size_for_crop |
4 | 3 | 0 |
meth |
ImagePatcher.patch_crop |
6 | 5 | 0 |
attr |
ImagePatcher.enable_patch |
1 | 0 | 0 |
func |
get_abs_pos |
3 | 0 | 0 |
meth |
Step3VisionEncoderLayer.init |
4 | 3 | 0 |
attr |
Step3VisionEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
Step3VisionEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
Step3VisionEncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
Step3VisionEncoderLayer.mlp |
1 | 0 | 0 |
attr |
Step3VisionEncoderLayer.layer_norm2 |
1 | 0 | 0 |
meth |
Step3VisionEncoder.init |
4 | 3 | 0 |
meth |
Step3VisionEncoder.forward |
2 | 0 | 0 |
attr |
Step3VisionEncoder.config |
1 | 0 | 0 |
attr |
Step3VisionEncoder.layers |
1 | 0 | 0 |
vllm.model_executor.models.step3p5 (87 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Step3p5Attention.init |
21 | 20 | 0 |
attr |
Step3p5Attention.hidden_size |
1 | 0 | 0 |
attr |
Step3p5Attention.total_num_heads |
1 | 0 | 0 |
attr |
Step3p5Attention.layer_idx |
1 | 0 | 0 |
attr |
Step3p5Attention.rank |
1 | 0 | 0 |
attr |
Step3p5Attention.partial_rotary_factor |
1 | 0 | 0 |
attr |
Step3p5Attention.num_heads |
1 | 0 | 0 |
attr |
Step3p5Attention.total_num_kv_heads |
1 | 0 | 0 |
attr |
Step3p5Attention.num_kv_heads |
1 | 0 | 0 |
attr |
Step3p5Attention.head_dim |
1 | 0 | 0 |
attr |
Step3p5Attention.q_size |
1 | 0 | 0 |
attr |
Step3p5Attention.kv_size |
1 | 0 | 0 |
attr |
Step3p5Attention.scaling |
1 | 0 | 0 |
attr |
Step3p5Attention.rope_theta |
1 | 0 | 0 |
attr |
Step3p5Attention.qkv_proj |
1 | 0 | 0 |
attr |
Step3p5Attention.o_proj |
1 | 0 | 0 |
attr |
Step3p5Attention.rotary_emb |
1 | 0 | 0 |
attr |
Step3p5Attention.q_norm |
1 | 0 | 0 |
attr |
Step3p5Attention.k_norm |
1 | 0 | 0 |
attr |
Step3p5Attention.use_head_wise_attn_gate |
1 | 0 | 0 |
attr |
Step3p5Attention.use_rope |
1 | 0 | 0 |
attr |
Step3p5Attention.attn |
1 | 0 | 0 |
attr |
Step3p5Attention.max_position_embeddings |
1 | 0 | 0 |
attr |
Step3p5Attention.rotary_dim |
1 | 0 | 0 |
attr |
Step3p5Attention.g_proj |
1 | 0 | 0 |
meth |
Step3p5ForCausalLM.init |
3 | 2 | 0 |
meth |
Step3p5ForCausalLM.forward |
5 | 4 | 0 |
attr |
Step3p5ForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.model |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.expert_weights |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.num_moe_layers |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.num_expert_groups |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.num_shared_experts |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.num_logical_experts |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.num_physical_experts |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.num_local_physical_experts |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.num_routed_experts |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.num_redundant_experts |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Step3p5ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Step3p5MLP.gate_up_proj |
1 | 0 | 0 |
attr |
Step3p5MLP.down_proj |
1 | 0 | 0 |
attr |
Step3p5MLP.act_fn |
1 | 0 | 0 |
attr |
Step3p5MLP.prefix |
1 | 0 | 0 |
attr |
Step3p5MLP.hidden_size |
1 | 0 | 0 |
attr |
Step3p5MLP.limit |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.hidden_size |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.layer_idx |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.use_moe |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.tp_group |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.use_fused_all_reduce |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.prefix |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.moe |
1 | 0 | 0 |
attr |
Step3p5DecoderLayer.mlp |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
Step3p5Model.vllm_config |
1 | 0 | 0 |
attr |
Step3p5Model.vocab_size |
1 | 0 | 0 |
attr |
Step3p5Model.config |
1 | 0 | 0 |
attr |
Step3p5Model.moe_num_experts |
1 | 0 | 0 |
attr |
Step3p5Model.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
Step3p5Model.embed_tokens |
1 | 0 | 0 |
attr |
Step3p5Model.norm |
1 | 0 | 0 |
meth |
FusedMoEBlock.init |
3 | 2 | 0 |
attr |
FusedMoEBlock.tp_size |
1 | 0 | 0 |
attr |
FusedMoEBlock.layer_idx |
1 | 0 | 0 |
attr |
FusedMoEBlock.ep_size |
1 | 0 | 0 |
attr |
FusedMoEBlock.ep_rank |
1 | 0 | 0 |
attr |
FusedMoEBlock.hidden_size |
1 | 0 | 0 |
attr |
FusedMoEBlock.enable_eplb |
1 | 0 | 0 |
attr |
FusedMoEBlock.n_routed_experts |
1 | 0 | 0 |
attr |
FusedMoEBlock.n_logical_experts |
1 | 0 | 0 |
attr |
FusedMoEBlock.n_redundant_experts |
1 | 0 | 0 |
attr |
FusedMoEBlock.n_physical_experts |
1 | 0 | 0 |
attr |
FusedMoEBlock.n_local_physical_experts |
1 | 0 | 0 |
attr |
FusedMoEBlock.physical_expert_start |
1 | 0 | 0 |
attr |
FusedMoEBlock.physical_expert_end |
1 | 0 | 0 |
attr |
FusedMoEBlock.gate |
1 | 0 | 0 |
attr |
FusedMoEBlock.use_moe_router_bias |
1 | 0 | 0 |
attr |
FusedMoEBlock.routed_scaling_factor |
1 | 0 | 0 |
attr |
FusedMoEBlock.router_bias |
1 | 0 | 0 |
attr |
FusedMoEBlock.need_fp32_gate |
1 | 0 | 0 |
attr |
FusedMoEBlock.share_expert |
1 | 0 | 0 |
attr |
FusedMoEBlock.experts |
1 | 0 | 0 |
vllm.model_executor.models.step3p5_mtp (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Step3p5AMultiTokenPredictor.init |
3 | 2 | 0 |
attr |
Step3p5AMultiTokenPredictor.embed_tokens |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictor.mtp_start_layer_idx |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictor.num_mtp_layers |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictor.layers |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictor.logits_processor |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictorLayer.enorm |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictorLayer.hnorm |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictorLayer.eh_proj |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictorLayer.shared_head |
1 | 0 | 0 |
attr |
Step3p5AMultiTokenPredictorLayer.mtp_block |
1 | 0 | 0 |
attr |
SharedHead.norm |
1 | 0 | 0 |
attr |
SharedHead.head |
1 | 0 | 0 |
meth |
Step3p5MTP.init |
3 | 2 | 0 |
attr |
Step3p5MTP.config |
1 | 0 | 0 |
attr |
Step3p5MTP.vllm_config |
1 | 0 | 0 |
attr |
Step3p5MTP.model |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.step_vl (88 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
apply_rotary_emb |
6 | 0 | 0 |
meth |
PerceptionEncoderVisionBlock.init |
12 | 11 | 0 |
meth |
PerceptionEncoderVisionBlock.forward |
3 | 2 | 0 |
attr |
PerceptionEncoderVisionBlock.attn |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionBlock.ls_1 |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionBlock.ls_2 |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionBlock.ln_1 |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionBlock.ln_2 |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionBlock.mlp |
1 | 0 | 0 |
meth |
PerceptionEncoderRope2D.init |
9 | 4 | 0 |
meth |
PerceptionEncoderRope2D._compute_freqs |
3 | 2 | 0 |
meth |
PerceptionEncoderRope2D.forward |
4 | 3 | 0 |
attr |
PerceptionEncoderRope2D.dim |
1 | 0 | 0 |
attr |
PerceptionEncoderRope2D.max_grid_height |
1 | 0 | 0 |
attr |
PerceptionEncoderRope2D.max_grid_width |
1 | 0 | 0 |
attr |
PerceptionEncoderRope2D.use_cls_token |
1 | 0 | 0 |
attr |
PerceptionEncoderRope2D.theta |
1 | 0 | 0 |
attr |
PerceptionEncoderRope2D.max_freq |
1 | 0 | 0 |
attr |
PerceptionEncoderRope2D.num_freqs |
1 | 0 | 0 |
meth |
PerceptionEncoder.init |
6 | 4 | 0 |
meth |
PerceptionEncoder.sample_abs_posemb |
3 | 2 | 0 |
meth |
PerceptionEncoder.forward_features |
2 | 1 | 0 |
meth |
PerceptionEncoder.forward |
2 | 1 | 0 |
attr |
PerceptionEncoder.patch_size |
1 | 0 | 0 |
attr |
PerceptionEncoder.output_dim |
1 | 0 | 0 |
attr |
PerceptionEncoder.heads |
1 | 0 | 0 |
attr |
PerceptionEncoder.width |
1 | 0 | 0 |
attr |
PerceptionEncoder.layers |
1 | 0 | 0 |
attr |
PerceptionEncoder.use_abs_posemb |
1 | 0 | 0 |
attr |
PerceptionEncoder.use_cls_token |
1 | 0 | 0 |
attr |
PerceptionEncoder.use_rope2d |
1 | 0 | 0 |
attr |
PerceptionEncoder.image_size |
1 | 0 | 0 |
attr |
PerceptionEncoder.conv1 |
1 | 0 | 0 |
attr |
PerceptionEncoder.ln_pre |
1 | 0 | 0 |
attr |
PerceptionEncoder.ln_post |
1 | 0 | 0 |
attr |
PerceptionEncoder.transformer |
1 | 0 | 0 |
attr |
PerceptionEncoder.vit_downsampler1 |
1 | 0 | 0 |
attr |
PerceptionEncoder.vit_downsampler2 |
1 | 0 | 0 |
attr |
PerceptionEncoder.class_embedding |
1 | 0 | 0 |
attr |
PerceptionEncoder.posemb_grid_size |
1 | 0 | 0 |
attr |
PerceptionEncoder.positional_embedding |
1 | 0 | 0 |
meth |
PerceptionEncoderLayerScale.init |
4 | 0 | 0 |
meth |
PerceptionEncoderLayerScale.forward |
2 | 0 | 0 |
attr |
PerceptionEncoderLayerScale.inplace |
1 | 0 | 0 |
attr |
PerceptionEncoderLayerScale.gamma |
1 | 0 | 0 |
meth |
PerceptionEncoderVisionTransformer.init |
13 | 12 | 0 |
meth |
PerceptionEncoderVisionTransformer.forward |
3 | 2 | 0 |
attr |
PerceptionEncoderVisionTransformer.width |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionTransformer.layers |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionTransformer.resblocks |
1 | 0 | 0 |
meth |
PerceptionEncoderMLP.init |
6 | 5 | 0 |
attr |
PerceptionEncoderMLP.fc1 |
1 | 0 | 0 |
attr |
PerceptionEncoderMLP.activation |
1 | 0 | 0 |
attr |
PerceptionEncoderMLP.fc2 |
1 | 0 | 0 |
attr |
StepVLForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
StepVLForConditionalGeneration.config |
1 | 0 | 0 |
attr |
StepVLForConditionalGeneration.multimodal_config |
1 | 0 | 0 |
attr |
StepVLForConditionalGeneration.use_data_parallel |
1 | 0 | 0 |
attr |
StepVLForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
StepVLForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
StepVLForConditionalGeneration.vit_large_projector |
1 | 0 | 0 |
attr |
StepVLForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
PerceptionEncoderVisionAttention.init |
8 | 7 | 0 |
attr |
PerceptionEncoderVisionAttention.embed_dim |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionAttention.total_num_heads |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionAttention.head_dim |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionAttention.scale |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionAttention.num_heads |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionAttention.qkv_proj |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionAttention.out_proj |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionAttention.attn |
1 | 0 | 0 |
attr |
PerceptionEncoderVisionAttention.rope |
1 | 0 | 0 |
func |
rotate_half |
2 | 0 | 0 |
vllm.model_executor.models.tarsier (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TarsierProcessor.call |
6 | 4 | 0 |
meth |
TarsierMultiModalProjector.init |
7 | 6 | 0 |
attr |
TarsierMultiModalProjector.linear_1 |
1 | 0 | 0 |
attr |
TarsierMultiModalProjector.act |
1 | 0 | 0 |
attr |
TarsierMultiModalProjector.linear_2 |
1 | 0 | 0 |
attr |
TarsierForConditionalGeneration.config |
1 | 0 | 0 |
attr |
TarsierForConditionalGeneration.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
TarsierForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
TarsierForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
TarsierForConditionalGeneration.language_model |
1 | 0 | 0 |
vllm.model_executor.models.telechat2 (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TeleChat2Model.init |
3 | 2 | 0 |
meth |
TeleChat2ForCausalLM._init_model |
4 | 3 | 0 |
attr |
TeleChat2ForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
vllm.model_executor.models.teleflm (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TeleFLMForCausalLM.init |
3 | 2 | 0 |
attr |
TeleFLMForCausalLM.use_mup |
1 | 0 | 0 |
attr |
TeleFLMForCausalLM.mup_scale_factor |
1 | 0 | 0 |
attr |
TeleFLMForCausalLM.output_mult |
1 | 0 | 0 |
attr |
TeleFLMForCausalLM.logits_processor |
1 | 0 | 0 |
meth |
TeleFLMModel.init |
4 | 3 | 0 |
attr |
TeleFLMModel.use_mup |
1 | 0 | 0 |
attr |
TeleFLMModel.input_mult |
1 | 0 | 0 |
vllm.model_executor.models.terratorch (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TerratorchProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
TerratorchMultiModalDataParser.init |
4 | 1 | 0 |
attr |
TerratorchMultiModalDataParser.input_definition |
1 | 0 | 0 |
meth |
Terratorch.init |
3 | 2 | 0 |
meth |
Terratorch.forward |
6 | 5 | 0 |
attr |
Terratorch.inference_runner |
1 | 0 | 0 |
attr |
Terratorch.model |
1 | 0 | 0 |
attr |
Terratorch.pooler |
1 | 0 | 0 |
meth |
TerratorchInputBuilder.init |
2 | 1 | 0 |
attr |
TerratorchInputBuilder.dummy_data_generator |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.transformers.base (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
vllm_flash_attention_forward |
9 | 7 | 0 |
meth |
Base.init_subclass |
3 | 0 | 0 |
meth |
Base.init |
3 | 2 | 0 |
meth |
Base.pipeline_parallel |
1 | 0 | 0 |
meth |
Base.recursive_replace |
1 | 0 | 0 |
meth |
Base.init_parameters |
3 | 2 | 0 |
meth |
Base.forward |
6 | 5 | 0 |
meth |
Base.check_version |
3 | 2 | 0 |
attr |
Base.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Base.config |
1 | 0 | 0 |
attr |
Base.text_config |
1 | 0 | 0 |
attr |
Base.cache_config |
1 | 0 | 0 |
attr |
Base.device_config |
1 | 0 | 0 |
attr |
Base.model_config |
1 | 0 | 0 |
attr |
Base.parallel_config |
1 | 0 | 0 |
attr |
Base.quant_config |
1 | 0 | 0 |
attr |
Base.pp_group |
1 | 0 | 0 |
attr |
Base.tp_group |
1 | 0 | 0 |
attr |
Base.attention_instances |
1 | 0 | 0 |
attr |
Base.embed_scale |
1 | 0 | 0 |
attr |
Base.make_empty_intermediate_tensors |
1 | 0 | 0 |
vllm.model_executor.models.transformers.causal (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CausalMixin.init |
3 | 2 | 0 |
attr |
CausalMixin.lm_head |
1 | 0 | 0 |
attr |
CausalMixin.logits_processor |
1 | 0 | 0 |
vllm.model_executor.models.transformers.legacy (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LegacyMixin.init |
3 | 2 | 0 |
attr |
LegacyMixin.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
LegacyMixin.is_roberta |
1 | 0 | 0 |
attr |
LegacyMixin.padding_idx |
1 | 0 | 0 |
vllm.model_executor.models.transformers.moe (7 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TransformersFusedMoE.init |
3 | 0 | 0 |
meth |
TransformersFusedMoE.forward |
5 | 5 | 1 |
meth |
MoEMixin.init |
3 | 2 | 0 |
meth |
MoEMixin.set_eplb_state |
4 | 3 | 0 |
meth |
MoEMixin.update_physical_experts_metadata |
3 | 2 | 0 |
meth |
MoEMixin.recursive_replace |
1 | 0 | 0 |
vllm.model_executor.models.transformers.multimodal (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MultiModalMixin.init |
3 | 2 | 0 |
meth |
MultiModalMixin.embed_multimodal |
2 | 0 | 0 |
attr |
MultiModalMixin.hf_to_vllm_mapper |
1 | 0 | 0 |
meth |
MultiModalProcessor._get_prompt_updates |
4 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
MultiModalProcessingInfo.get_supported_mm_limits |
1 | 0 | 0 |
meth |
MultiModalProcessingInfo.get_mm_max_tokens_per_item |
3 | 0 | 0 |
meth |
MultiModalProcessingInfo.get_max_image_size |
1 | 0 | 0 |
vllm.model_executor.models.transformers.pooling (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SequenceClassificationMixin.init |
3 | 2 | 0 |
attr |
SequenceClassificationMixin.classifier |
1 | 0 | 0 |
attr |
SequenceClassificationMixin.pooler |
1 | 0 | 0 |
meth |
EmbeddingMixin.init |
3 | 2 | 0 |
attr |
EmbeddingMixin.pooler |
1 | 0 | 0 |
vllm.model_executor.models.transformers.utils (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
init_on_device_without_buffers |
2 | 1 | 0 |
func |
log_replacement |
4 | 3 | 0 |
vllm.model_executor.models.ultravox (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModifiedWhisperEncoder.init |
3 | 0 | 0 |
meth |
ModifiedWhisperEncoder.get_attention_mask_by_audio_len |
3 | 2 | 0 |
meth |
ModifiedWhisperEncoder.forward |
3 | 2 | 0 |
prop |
ModifiedWhisperEncoder.max_context_length |
1 | 0 | 0 |
meth |
UltravoxProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
StackAudioFrames.init |
2 | 1 | 0 |
attr |
StackAudioFrames.stack_factor |
1 | 0 | 0 |
meth |
UltravoxTransformerProjector.init |
2 | 1 | 0 |
attr |
UltravoxTransformerProjector.config |
1 | 0 | 0 |
attr |
UltravoxTransformerProjector.ln_pre |
1 | 0 | 0 |
attr |
UltravoxTransformerProjector.linear_in |
1 | 0 | 0 |
attr |
UltravoxTransformerProjector.embed_positions |
1 | 0 | 0 |
attr |
UltravoxTransformerProjector.layers |
1 | 0 | 0 |
attr |
UltravoxTransformerProjector.ln_post |
1 | 0 | 0 |
attr |
UltravoxTransformerProjector.linear_out |
1 | 0 | 0 |
meth |
UltravoxModel.init |
3 | 2 | 0 |
meth |
UltravoxModel.forward |
6 | 5 | 0 |
attr |
UltravoxModel.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
UltravoxModel.config |
1 | 0 | 0 |
attr |
UltravoxModel.multi_modal_config |
1 | 0 | 0 |
attr |
UltravoxModel.secondary_weights |
1 | 0 | 0 |
attr |
UltravoxModel.make_empty_intermediate_tensors |
1 | 0 | 0 |
attr |
UltravoxModel.audio_tower |
1 | 0 | 0 |
attr |
UltravoxModel.language_model |
1 | 0 | 0 |
attr |
UltravoxModel.multi_modal_projector |
1 | 0 | 0 |
meth |
UltravoxFeedForwardProjector.init |
2 | 1 | 0 |
attr |
UltravoxFeedForwardProjector.hidden_dim |
1 | 0 | 0 |
attr |
UltravoxFeedForwardProjector.ln_pre |
1 | 0 | 0 |
attr |
UltravoxFeedForwardProjector.linear_1 |
1 | 0 | 0 |
attr |
UltravoxFeedForwardProjector.linear_2 |
1 | 0 | 0 |
attr |
UltravoxFeedForwardProjector.act |
1 | 0 | 0 |
attr |
UltravoxFeedForwardProjector.ln_post |
1 | 0 | 0 |
vllm.model_executor.models.utils (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
collect_children |
3 | 2 | 0 |
func |
make_empty_intermediate_tensors_factory |
3 | 2 | 0 |
func |
no_init_weights |
4 | 3 | 0 |
meth |
AutoWeightsLoader._add_loadable_non_param_tensors |
3 | 2 | 0 |
attr |
AutoWeightsLoader.module |
1 | 0 | 0 |
attr |
AutoWeightsLoader.skip_prefixes |
1 | 0 | 0 |
attr |
AutoWeightsLoader.skip_substrs |
1 | 0 | 0 |
attr |
AutoWeightsLoader.ignore_unexpected_prefixes |
1 | 0 | 0 |
attr |
AutoWeightsLoader.ignore_unexpected_suffixes |
1 | 0 | 0 |
meth |
StageMissingLayer.getattr |
2 | 1 | 0 |
meth |
StageMissingLayer.call |
3 | 0 | 0 |
attr |
StageMissingLayer.stage_name |
1 | 0 | 0 |
meth |
PPMissingLayer.init |
3 | 0 | 0 |
meth |
PPMissingLayer.forward |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.vision (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
VisionEncoderInfo.hf_config |
1 | 0 | 0 |
attr |
VisionEncoderInfo.vision_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
is_vit_use_data_parallel |
1 | 0 | 0 |
vllm.model_executor.models.voxtral (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
AudioLanguageAdapter.w_in |
1 | 0 | 0 |
attr |
AudioLanguageAdapter.gelu |
1 | 0 | 0 |
attr |
AudioLanguageAdapter.w_out |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.init |
3 | 2 | 0 |
meth |
VoxtralForConditionalGeneration.embed_multimodal |
2 | 1 | 0 |
attr |
VoxtralForConditionalGeneration.tokenizer |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.config |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.downsample_factor |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.whisper_encoder |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.audio_language_adapter |
1 | 0 | 0 |
meth |
VoxtralProcessingInfo.get_data_parser |
1 | 0 | 0 |
attr |
VoxtralEncoderModel.config |
1 | 0 | 0 |
attr |
VoxtralEncoderModel.is_causal |
1 | 0 | 0 |
attr |
VoxtralEncoderModel.whisper_encoder |
1 | 0 | 0 |
attr |
VoxtralEncoderModel.mel_filters |
1 | 0 | 0 |
meth |
VoxtralProcessorAdapter.call |
5 | 4 | 0 |
attr |
VoxtralProcessorAdapter.tokenizer |
1 | 0 | 0 |
vllm.model_executor.models.voxtral_realtime (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralRealtimeGeneration.init |
3 | 2 | 0 |
meth |
VoxtralRealtimeGeneration.embed_multimodal |
2 | 1 | 0 |
prop |
VoxtralRealtimeGeneration.audio_config |
1 | 0 | 0 |
attr |
VoxtralRealtimeGeneration.n_delay_tokens |
1 | 0 | 0 |
attr |
TimeEmbedding.dim |
1 | 0 | 0 |
attr |
TimeEmbedding.theta |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.voyage (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoyageQwen3BidirectionalEmbedModel.init |
3 | 0 | 0 |
meth |
VoyageQwen3BidirectionalEmbedModel.forward |
3 | 0 | 0 |
attr |
VoyageQwen3BidirectionalEmbedModel.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
VoyageQwen3BidirectionalEmbedModel.linear |
1 | 0 | 0 |
vllm.model_executor.models.whisper (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
WhisperPositionalEmbedding.init |
3 | 2 | 0 |
meth |
WhisperPositionalEmbedding.forward |
2 | 0 | 0 |
meth |
WhisperForConditionalGeneration.init |
3 | 2 | 0 |
meth |
WhisperForConditionalGeneration.forward |
5 | 4 | 0 |
attr |
WhisperForConditionalGeneration.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
WhisperForConditionalGeneration.config |
1 | 0 | 0 |
attr |
WhisperForConditionalGeneration.dtype |
1 | 0 | 0 |
attr |
WhisperForConditionalGeneration.proj_out |
1 | 0 | 0 |
attr |
WhisperForConditionalGeneration.logits_processor |
1 | 0 | 0 |
attr |
WhisperForConditionalGeneration.model |
1 | 0 | 0 |
meth |
WhisperEncoder.init |
4 | 3 | 0 |
attr |
WhisperEncoder.pos_embed_type |
1 | 0 | 0 |
attr |
WhisperEncoder.num_mel_bins |
1 | 0 | 0 |
attr |
WhisperEncoder.max_source_positions |
1 | 0 | 0 |
attr |
WhisperEncoder.embed_scale |
1 | 0 | 0 |
attr |
WhisperEncoder.conv1 |
1 | 0 | 0 |
attr |
WhisperEncoder.conv2 |
1 | 0 | 0 |
attr |
WhisperEncoder.total_stride |
1 | 0 | 0 |
attr |
WhisperEncoder.layer_norm |
1 | 0 | 0 |
attr |
WhisperEncoder.embed_positions |
1 | 0 | 0 |
meth |
WhisperMLP.init |
6 | 5 | 0 |
meth |
WhisperMLP.forward |
2 | 1 | 0 |
attr |
WhisperMLP.activation_fn |
1 | 0 | 0 |
attr |
WhisperMLP.fc1 |
1 | 0 | 0 |
attr |
WhisperMLP.fc2 |
1 | 0 | 0 |
meth |
WhisperProcessingInfo.get_data_parser |
1 | 0 | 0 |
meth |
WhisperDecoder.init |
3 | 2 | 0 |
meth |
WhisperDecoder.forward |
4 | 2 | 0 |
attr |
WhisperDecoder.layerdrop |
1 | 0 | 0 |
attr |
WhisperDecoder.padding_idx |
1 | 0 | 0 |
attr |
WhisperDecoder.max_target_positions |
1 | 0 | 0 |
attr |
WhisperDecoder.max_source_positions |
1 | 0 | 0 |
attr |
WhisperDecoder.embed_scale |
1 | 0 | 0 |
attr |
WhisperDecoder.embed_tokens |
1 | 0 | 0 |
attr |
WhisperDecoder.embed_positions |
1 | 0 | 0 |
attr |
WhisperDecoder.layer_norm |
1 | 0 | 0 |
meth |
WhisperEncoderLayer.init |
3 | 2 | 0 |
meth |
WhisperEncoderLayer.forward |
2 | 1 | 0 |
attr |
WhisperEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
WhisperEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
WhisperEncoderLayer.self_attn_layer_norm |
1 | 0 | 0 |
attr |
WhisperEncoderLayer.mlp |
1 | 0 | 0 |
attr |
WhisperEncoderLayer.final_layer_norm |
1 | 0 | 0 |
meth |
WhisperAttention.init |
9 | 8 | 0 |
meth |
WhisperAttention.forward |
2 | 1 | 0 |
attr |
WhisperAttention.embed_dim |
1 | 0 | 0 |
attr |
WhisperAttention.total_num_heads |
1 | 0 | 0 |
attr |
WhisperAttention.num_heads |
1 | 0 | 0 |
attr |
WhisperAttention.num_kv_heads |
1 | 0 | 0 |
attr |
WhisperAttention.head_dim |
1 | 0 | 0 |
attr |
WhisperAttention.q_size |
1 | 0 | 0 |
attr |
WhisperAttention.kv_size |
1 | 0 | 0 |
attr |
WhisperAttention.attn_type |
1 | 0 | 0 |
attr |
WhisperAttention.scaling |
1 | 0 | 0 |
attr |
WhisperAttention.out_proj |
1 | 0 | 0 |
attr |
WhisperAttention.attn |
1 | 0 | 0 |
meth |
WhisperCrossAttention.init |
7 | 6 | 0 |
meth |
WhisperCrossAttention.forward |
3 | 2 | 0 |
meth |
WhisperDecoderLayer.init |
3 | 2 | 0 |
meth |
WhisperDecoderLayer.forward |
3 | 2 | 0 |
attr |
WhisperDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
WhisperDecoderLayer.self_attn_layer_norm |
1 | 0 | 0 |
attr |
WhisperDecoderLayer.encoder_attn |
1 | 0 | 0 |
attr |
WhisperDecoderLayer.encoder_attn_layer_norm |
1 | 0 | 0 |
attr |
WhisperDecoderLayer.mlp |
1 | 0 | 0 |
attr |
WhisperDecoderLayer.final_layer_norm |
1 | 0 | 0 |
meth |
WhisperModel.init |
3 | 2 | 0 |
attr |
WhisperModel.encoder |
1 | 0 | 0 |
attr |
WhisperModel.decoder |
1 | 0 | 0 |
vllm.model_executor.models.whisper_causal (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
CausalRMSNorm |
1 | 0 | 0 |
meth |
WhisperCausalAttention.init |
12 | 11 | 0 |
meth |
WhisperCausalAttention.forward |
3 | 2 | 0 |
attr |
WhisperCausalAttention.embed_dim |
1 | 0 | 0 |
attr |
WhisperCausalAttention.total_num_heads |
1 | 0 | 0 |
attr |
WhisperCausalAttention.num_heads |
1 | 0 | 0 |
attr |
WhisperCausalAttention.num_kv_heads |
1 | 0 | 0 |
attr |
WhisperCausalAttention.head_dim |
1 | 0 | 0 |
attr |
WhisperCausalAttention.q_size |
1 | 0 | 0 |
attr |
WhisperCausalAttention.kv_size |
1 | 0 | 0 |
attr |
WhisperCausalAttention.attn_type |
1 | 0 | 0 |
attr |
WhisperCausalAttention.scaling |
1 | 0 | 0 |
attr |
WhisperCausalAttention.out_proj |
1 | 0 | 0 |
attr |
WhisperCausalAttention.attn |
1 | 0 | 0 |
meth |
WhisperCausalEncoderLayer.init |
3 | 2 | 0 |
meth |
WhisperCausalEncoderLayer.forward |
3 | 2 | 0 |
attr |
WhisperCausalEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
WhisperCausalEncoderLayer.head_dim |
1 | 0 | 0 |
attr |
WhisperCausalEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
WhisperCausalEncoderLayer.self_attn_layer_norm |
1 | 0 | 0 |
attr |
WhisperCausalEncoderLayer.mlp |
1 | 0 | 0 |
attr |
WhisperCausalEncoderLayer.final_layer_norm |
1 | 0 | 0 |
meth |
WhisperCausalAttentionWithBlockPooling.init |
16 | 15 | 0 |
meth |
WhisperCausalAttentionWithBlockPooling.get_kv_cache_spec |
2 | 1 | 0 |
attr |
WhisperCausalAttentionWithBlockPooling.block_pool_size |
1 | 0 | 0 |
meth |
WhisperCausalEncoder.init |
3 | 2 | 0 |
attr |
WhisperCausalEncoder.num_mel_bins |
1 | 0 | 0 |
attr |
WhisperCausalEncoder.max_source_positions |
1 | 0 | 0 |
attr |
WhisperCausalEncoder.embed_scale |
1 | 0 | 0 |
attr |
WhisperCausalEncoder.conv1 |
1 | 0 | 0 |
attr |
WhisperCausalEncoder.conv2 |
1 | 0 | 0 |
attr |
WhisperCausalEncoder.total_stride |
1 | 0 | 0 |
attr |
WhisperCausalEncoder.layer_norm |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.models.zamba2 (51 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Zamba2MambaDecoderLayer.mamba |
1 | 0 | 0 |
attr |
Zamba2MambaDecoderLayer.input_layernorm |
1 | 0 | 0 |
meth |
Zamba2LoRA.init |
6 | 5 | 0 |
meth |
Zamba2LoRA.forward |
2 | 1 | 0 |
attr |
Zamba2LoRA.A |
1 | 0 | 0 |
attr |
Zamba2LoRA.B |
1 | 0 | 0 |
attr |
Zamba2HybridLayer.block_idx |
1 | 0 | 0 |
attr |
Zamba2HybridLayer.shared_transformer |
1 | 0 | 0 |
attr |
Zamba2HybridLayer.linear |
1 | 0 | 0 |
attr |
Zamba2HybridLayer.mamba_decoder |
1 | 0 | 0 |
attr |
Zamba2AttentionDecoderLayer.self_attn |
1 | 0 | 0 |
attr |
Zamba2AttentionDecoderLayer.feed_forward |
1 | 0 | 0 |
attr |
Zamba2AttentionDecoderLayer.input_layernorm |
1 | 0 | 0 |
attr |
Zamba2AttentionDecoderLayer.pre_ff_layernorm |
1 | 0 | 0 |
attr |
Zamba2Attention.config |
1 | 0 | 0 |
attr |
Zamba2Attention.num_hybrid_layers |
1 | 0 | 0 |
attr |
Zamba2Attention.attention_hidden_size |
1 | 0 | 0 |
attr |
Zamba2Attention.total_num_attention_heads |
1 | 0 | 0 |
attr |
Zamba2Attention.num_attention_heads |
1 | 0 | 0 |
attr |
Zamba2Attention.attention_head_dim |
1 | 0 | 0 |
attr |
Zamba2Attention.qkv_size |
1 | 0 | 0 |
attr |
Zamba2Attention.scale |
1 | 0 | 0 |
attr |
Zamba2Attention.qkv_proj |
1 | 0 | 0 |
attr |
Zamba2Attention.o_proj |
1 | 0 | 0 |
attr |
Zamba2Attention.dpa_list |
1 | 0 | 0 |
attr |
Zamba2Attention.linear_q_adapter_list |
1 | 0 | 0 |
attr |
Zamba2Attention.linear_k_adapter_list |
1 | 0 | 0 |
attr |
Zamba2Attention.linear_v_adapter_list |
1 | 0 | 0 |
attr |
Zamba2Attention.rotary_emb |
1 | 0 | 0 |
attr |
Zamba2Model.config |
1 | 0 | 0 |
attr |
Zamba2Model.vocab_size |
1 | 0 | 0 |
attr |
Zamba2Model.embed_tokens |
1 | 0 | 0 |
attr |
Zamba2Model.layers |
1 | 0 | 0 |
attr |
Zamba2Model.final_layernorm |
1 | 0 | 0 |
meth |
Zamba2ForCausalLM.forward |
5 | 5 | 1 |
attr |
Zamba2ForCausalLM.hf_to_vllm_mapper |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.config |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.vllm_config |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.scheduler_config |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.model_config |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.model |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.logits_processor |
1 | 0 | 0 |
attr |
Zamba2MLP.config |
1 | 0 | 0 |
attr |
Zamba2MLP.tp_size |
1 | 0 | 0 |
attr |
Zamba2MLP.num_hybrid_layers |
1 | 0 | 0 |
attr |
Zamba2MLP.hidden_size |
1 | 0 | 0 |
attr |
Zamba2MLP.intermediate_size |
1 | 0 | 0 |
attr |
Zamba2MLP.gate_up_proj |
1 | 0 | 0 |
attr |
Zamba2MLP.down_proj |
1 | 0 | 0 |
attr |
Zamba2MLP.act_fn |
1 | 0 | 0 |
attr |
Zamba2MLP.gate_up_proj_adapter_list |
1 | 0 | 0 |
vllm.model_executor.offloader.base (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseOffloader.post_init |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.offloader.prefetch (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PrefetchOffloader.init |
6 | 5 | 0 |
meth |
PrefetchOffloader._hook_module_forward |
3 | 2 | 0 |
meth |
PrefetchOffloader._wait_for_layer |
2 | 1 | 0 |
meth |
PrefetchOffloader.sync_prev_onload |
1 | 0 | 0 |
meth |
PrefetchOffloader._start_prefetch |
2 | 1 | 0 |
meth |
PrefetchOffloader.join_after_forward |
1 | 0 | 0 |
meth |
PrefetchOffloader.post_init |
1 | 0 | 0 |
attr |
PrefetchOffloader.group_size |
1 | 0 | 0 |
attr |
PrefetchOffloader.num_in_group |
1 | 0 | 0 |
attr |
PrefetchOffloader.prefetch_step |
1 | 0 | 0 |
attr |
PrefetchOffloader.offload_params |
1 | 0 | 0 |
attr |
PrefetchOffloader.mode |
1 | 0 | 0 |
attr |
PrefetchOffloader.copy_stream |
1 | 0 | 0 |
attr |
PrefetchOffloader.total_offloaded_bytes |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
StaticBufferPool.init |
4 | 3 | 0 |
attr |
StaticBufferPool.slot_capacity |
1 | 0 | 0 |
attr |
StaticBufferPool.total_bytes |
1 | 0 | 0 |
vllm.model_executor.offloader.uva (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UVAOffloader.init |
3 | 2 | 0 |
attr |
UVAOffloader.cpu_offload_max_bytes |
1 | 0 | 0 |
attr |
UVAOffloader.cpu_offload_bytes |
1 | 0 | 0 |
attr |
UVAOffloader.cpu_offload_params |
1 | 0 | 0 |
attr |
UVAOffloader.pin_memory |
1 | 0 | 0 |
attr |
UVAOffloader.uva_offloading |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.model_executor.parameter (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PackedvLLMParameter.init |
5 | 3 | 0 |
meth |
PackedvLLMParameter.adjust_shard_indexes_for_packing |
3 | 0 | 0 |
prop |
PackedvLLMParameter.packed_dim |
1 | 0 | 0 |
prop |
PackedvLLMParameter.packed_factor |
1 | 0 | 0 |
prop |
PackedvLLMParameter.marlin_tile_size |
1 | 0 | 0 |
meth |
BasevLLMParameter.new |
3 | 1 | 0 |
meth |
BasevLLMParameter.init |
3 | 2 | 0 |
meth |
BasevLLMParameter._is_1d_and_scalar |
2 | 1 | 0 |
meth |
BasevLLMParameter._assert_and_load |
2 | 1 | 0 |
meth |
BasevLLMParameter.load_column_parallel_weight |
2 | 1 | 0 |
meth |
BasevLLMParameter.load_row_parallel_weight |
2 | 1 | 0 |
meth |
BasevLLMParameter.load_merged_column_weight |
3 | 1 | 0 |
meth |
BasevLLMParameter.load_qkv_weight |
3 | 1 | 0 |
meth |
BasevLLMParameter.torch_function |
5 | 0 | 0 |
attr |
BasevLLMParameter.tp_rank |
1 | 0 | 0 |
attr |
BasevLLMParameter.tp_size |
1 | 0 | 0 |
meth |
PerTensorScaleParameter.init |
2 | 0 | 0 |
meth |
PerTensorScaleParameter.load_row_parallel_weight |
3 | 0 | 0 |
meth |
PerTensorScaleParameter.load_merged_column_weight |
3 | 0 | 0 |
meth |
PerTensorScaleParameter.load_qkv_weight |
3 | 0 | 0 |
meth |
PerTensorScaleParameter.load_column_parallel_weight |
3 | 0 | 0 |
meth |
PerTensorScaleParameter._load_into_shard_id |
4 | 2 | 0 |
meth |
PackedColumnParameter.init |
5 | 3 | 0 |
meth |
PackedColumnParameter.adjust_shard_indexes_for_packing |
3 | 0 | 0 |
prop |
PackedColumnParameter.packed_dim |
1 | 0 | 0 |
prop |
PackedColumnParameter.packed_factor |
1 | 0 | 0 |
prop |
PackedColumnParameter.marlin_tile_size |
1 | 0 | 0 |
meth |
RowvLLMParameter.init |
3 | 1 | 0 |
meth |
RowvLLMParameter.load_row_parallel_weight |
2 | 1 | 0 |
prop |
RowvLLMParameter.input_dim |
1 | 0 | 0 |
vllm.model_executor.utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
set_weight_attrs |
3 | 2 | 0 |
func |
replace_parameter |
4 | 3 | 0 |
vllm.model_executor.warmup.deep_gemm_warmup (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
deepgemm_grouped_fp8_gemm_nt_contiguous_warmup |
4 | 3 | 0 |
func |
deepgemm_fp8_gemm_nt_warmup |
4 | 3 | 0 |
func |
deep_gemm_warmup |
3 | 2 | 0 |
vllm.model_executor.warmup.kernel_warmup (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
kernel_warmup |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.multimodal (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MULTIMODAL_REGISTRY |
1 | 0 | 0 |
vllm.multimodal.audio (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MONO_AUDIO_SPEC |
1 | 0 | 0 |
meth |
AudioResampler.init |
3 | 2 | 0 |
attr |
AudioResampler.target_sr |
1 | 0 | 0 |
attr |
AudioResampler.method |
1 | 0 | 0 |
attr |
scipy_signal |
1 | 0 | 0 |
attr |
PASSTHROUGH_AUDIO_SPEC |
1 | 0 | 0 |
func |
resample_audio_scipy |
4 | 3 | 0 |
attr |
librosa |
1 | 0 | 0 |
vllm.multimodal.cache (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MultiModalProcessorCacheItemMetadata.item_size |
1 | 0 | 0 |
attr |
MultiModalProcessorCacheItemMetadata.prompt_updates |
1 | 0 | 0 |
attr |
ShmObjectStoreSenderCache.world_size |
1 | 0 | 0 |
attr |
ShmObjectStoreReceiverCache.world_size |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
MultiModalProcessorCacheItem.item |
1 | 0 | 0 |
attr |
MultiModalProcessorCacheItem.prompt_updates |
1 | 0 | 0 |
vllm.multimodal.encoder_budget (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MultiModalBudget.model_config |
1 | 0 | 0 |
attr |
MultiModalBudget.scheduler_config |
1 | 0 | 0 |
attr |
MultiModalBudget.max_model_len |
1 | 0 | 0 |
attr |
MultiModalBudget.max_num_reqs |
1 | 0 | 0 |
attr |
MultiModalBudget.encoder_compute_budget |
1 | 0 | 0 |
attr |
MultiModalBudget.encoder_cache_size |
1 | 0 | 0 |
attr |
MultiModalBudget.mm_max_toks_per_item |
1 | 0 | 0 |
attr |
MultiModalBudget.cache |
1 | 0 | 0 |
attr |
MultiModalBudget.processor |
1 | 0 | 0 |
attr |
MultiModalBudget.mm_limits |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.multimodal.hasher (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.multimodal.image (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
convert_image_mode |
3 | 2 | 0 |
vllm.multimodal.inputs (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MultiModalKwargsItems.from_hf_inputs |
3 | 2 | 0 |
meth |
BaseMultiModalField._field_factory |
1 | 0 | 0 |
attr |
torch |
1 | 0 | 0 |
meth |
MultiModalFeatureSpec.gather_kwargs |
3 | 2 | 0 |
meth |
MultiModalFieldConfig.batched |
3 | 2 | 0 |
meth |
MultiModalFieldConfig.flat |
5 | 4 | 0 |
meth |
MultiModalFieldConfig.flat_from_sizes |
5 | 4 | 0 |
meth |
MultiModalFieldConfig.shared |
4 | 3 | 0 |
meth |
MultiModalKwargsItem.dummy |
2 | 1 | 0 |
vllm.multimodal.media.audio (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AudioMediaIO.init |
2 | 1 | 0 |
attr |
AudioMediaIO.kwargs |
1 | 0 | 0 |
attr |
soundfile |
1 | 0 | 0 |
attr |
librosa |
1 | 0 | 0 |
vllm.multimodal.media.base (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MediaWithBytes.array |
3 | 1 | 0 |
meth |
MediaWithBytes.getstate |
1 | 0 | 0 |
meth |
MediaWithBytes.setstate |
2 | 1 | 0 |
meth |
MediaWithBytes.getattr |
2 | 1 | 0 |
vllm.multimodal.media.connector (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MediaConnector.connection |
1 | 0 | 0 |
attr |
MediaConnector.allowed_local_media_path |
1 | 0 | 0 |
attr |
MediaConnector.allowed_media_domains |
1 | 0 | 0 |
attr |
MEDIA_CONNECTOR_REGISTRY |
1 | 0 | 0 |
attr |
global_thread_pool |
1 | 0 | 0 |
vllm.multimodal.media.image (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageMediaIO.init |
3 | 2 | 0 |
attr |
ImageMediaIO.image_mode |
1 | 0 | 0 |
attr |
ImageMediaIO.kwargs |
1 | 0 | 0 |
attr |
ImageMediaIO.rgba_background_color |
1 | 0 | 0 |
vllm.multimodal.media.video (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoMediaIO.init |
4 | 3 | 0 |
attr |
VideoMediaIO.image_io |
1 | 0 | 0 |
attr |
VideoMediaIO.num_frames |
1 | 0 | 0 |
attr |
VideoMediaIO.kwargs |
1 | 0 | 0 |
attr |
VideoMediaIO.video_loader |
1 | 0 | 0 |
vllm.multimodal.parse (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MultiModalDataParser.audio_resampler |
1 | 0 | 0 |
attr |
MultiModalDataParser.target_channels |
1 | 0 | 0 |
attr |
MultiModalDataParser.video_needs_metadata |
1 | 0 | 0 |
attr |
MultiModalDataParser.expected_hidden_size |
1 | 0 | 0 |
attr |
ModalityDataItems.modality |
1 | 0 | 0 |
attr |
VideoProcessorItems.metadata |
1 | 0 | 0 |
attr |
DictEmbeddingItems.fields_config |
1 | 0 | 0 |
attr |
DictEmbeddingItems.required_fields |
1 | 0 | 0 |
attr |
PILImage |
1 | 0 | 0 |
meth |
MultiModalDataItems.select |
2 | 1 | 0 |
vllm.multimodal.processing.context (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InputProcessingContext.get_mm_config |
1 | 0 | 0 |
meth |
InputProcessingContext.get_merged_mm_kwargs |
2 | 1 | 0 |
meth |
TimingContext.record |
2 | 1 | 0 |
meth |
TimingContext.get_stats_dict |
1 | 0 | 0 |
attr |
BaseProcessingInfo.ctx |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.multimodal.processing.dummy_inputs (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
BaseDummyInputsBuilder.info |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.multimodal.processing.processor (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncDecMultiModalProcessor._get_enc_dec_inputs |
4 | 3 | 0 |
attr |
BaseMultiModalProcessor.info |
1 | 0 | 0 |
attr |
BaseMultiModalProcessor.dummy_inputs |
1 | 0 | 0 |
attr |
BaseMultiModalProcessor.cache |
1 | 0 | 0 |
attr |
BaseMultiModalProcessor.data_parser |
1 | 0 | 0 |
meth |
ResolvedPromptUpdate.with_target |
2 | 1 | 0 |
meth |
ResolvedPromptUpdate.with_content |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.multimodal.registry (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MultiModalRegistry.register_processor |
4 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.multimodal.utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
torch |
1 | 0 | 0 |
vllm.multimodal.video (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
VIDEO_LOADER_REGISTRY |
1 | 0 | 0 |
meth |
OpenCVVideoBackend.get_cv2_video_api |
1 | 0 | 0 |
meth |
OpenCVVideoBackend.load_bytes |
7 | 6 | 0 |
meth |
OpenCVDynamicOpenPanguVideoBackend.load_bytes |
7 | 6 | 0 |
meth |
VideoLoader.load_bytes |
4 | 3 | 0 |
meth |
VideoLoader._read_frames |
5 | 4 | 0 |
meth |
OpenCVDynamicVideoBackend.load_bytes |
7 | 6 | 0 |
meth |
Molmo2VideoBackend.get_cv2_video_api |
1 | 0 | 0 |
meth |
Molmo2VideoBackend.sample_times |
7 | 6 | 0 |
meth |
Molmo2VideoBackend.load_bytes_opencv |
7 | 6 | 0 |
meth |
Molmo2VideoBackend.load_bytes |
4 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.outputs (27 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClassificationRequestOutput.from_base |
2 | 1 | 0 |
meth |
EmbeddingRequestOutput.from_base |
2 | 1 | 0 |
meth |
PoolingRequestOutput.init |
6 | 5 | 0 |
meth |
PoolingRequestOutput.repr |
1 | 0 | 0 |
attr |
PoolingRequestOutput.request_id |
1 | 0 | 0 |
attr |
PoolingRequestOutput.prompt_token_ids |
1 | 0 | 0 |
attr |
PoolingRequestOutput.num_cached_tokens |
1 | 0 | 0 |
attr |
PoolingRequestOutput.finished |
1 | 0 | 0 |
attr |
PoolingRequestOutput.outputs |
1 | 0 | 0 |
meth |
EmbeddingOutput.from_base |
2 | 1 | 0 |
meth |
ClassificationOutput.from_base |
2 | 1 | 0 |
meth |
ScoringOutput.from_base |
2 | 1 | 0 |
meth |
ScoringRequestOutput.from_base |
2 | 1 | 0 |
meth |
RequestOutput.init |
14 | 14 | 1 |
attr |
RequestOutput.request_id |
1 | 0 | 0 |
attr |
RequestOutput.prompt |
1 | 0 | 0 |
attr |
RequestOutput.prompt_token_ids |
1 | 0 | 0 |
attr |
RequestOutput.prompt_logprobs |
1 | 0 | 0 |
attr |
RequestOutput.outputs |
1 | 0 | 0 |
attr |
RequestOutput.finished |
1 | 0 | 0 |
attr |
RequestOutput.metrics |
1 | 0 | 0 |
attr |
RequestOutput.lora_request |
1 | 0 | 0 |
attr |
RequestOutput.encoder_prompt |
1 | 0 | 0 |
attr |
RequestOutput.encoder_prompt_token_ids |
1 | 0 | 0 |
attr |
RequestOutput.num_cached_tokens |
1 | 0 | 0 |
attr |
RequestOutput.kv_transfer_params |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
STREAM_FINISHED |
1 | 0 | 0 |
vllm.parser.abstract_parser (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
_WrappedParser.init |
2 | 1 | 0 |
meth |
Parser.init |
4 | 1 | 0 |
attr |
Parser.model_tokenizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.parser.minimax_m2_parser (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxM2Parser.init |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.parser.parser_manager (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.platforms.cpu (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_max_threads |
2 | 0 | 0 |
meth |
CpuPlatform.inference_mode |
1 | 0 | 0 |
meth |
LogicalCPUInfo.json_decoder |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.platforms.cuda (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CudaPlatformBase.log_warnings |
1 | 0 | 0 |
meth |
CudaPlatformBase.check_if_supports_dtype |
2 | 1 | 0 |
meth |
CudaPlatformBase.num_compute_units |
2 | 0 | 0 |
attr |
pynvml |
1 | 0 | 0 |
meth |
NvmlCudaPlatform.log_warnings |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.platforms.interface (9 missing, 6 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Platform.device_id_to_physical_device_id |
2 | 1 | 0 |
meth |
Platform.inference_mode |
1 | 0 | 0 |
meth |
Platform.getattr |
2 | 1 | 0 |
meth |
Platform.get_global_graph_pool |
1 | 1 | 1 |
meth |
Platform.check_if_supports_dtype |
2 | 1 | 0 |
meth |
Platform.make_synced_weight_loader |
2 | 0 | 0 |
meth |
Platform.set_additional_forward_context |
3 | 1 | 0 |
meth |
DeviceCapability.lt |
2 | 2 | 1 |
meth |
DeviceCapability.le |
2 | 2 | 1 |
meth |
DeviceCapability.eq |
2 | 2 | 1 |
meth |
DeviceCapability.ge |
2 | 2 | 1 |
meth |
DeviceCapability.gt |
2 | 2 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.platforms.rocm (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
with_amdsmi_context |
2 | 0 | 0 |
meth |
RocmPlatform.check_if_supports_dtype |
2 | 1 | 0 |
meth |
RocmPlatform.num_compute_units |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.platforms.tpu (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.platforms.xpu (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
XPUPlatform.inference_mode |
1 | 0 | 0 |
meth |
XPUPlatform.is_pin_memory_available |
1 | 0 | 0 |
meth |
XPUPlatform.check_if_supports_dtype |
2 | 1 | 0 |
vllm.plugins (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
load_general_plugins |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.plugins.io_processors (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.plugins.io_processors.interface (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IOProcessor.init |
3 | 2 | 0 |
meth |
IOProcessor.pre_process |
4 | 3 | 0 |
meth |
IOProcessor.pre_process_async |
4 | 3 | 0 |
meth |
IOProcessor.post_process |
4 | 3 | 0 |
meth |
IOProcessor.post_process_async |
4 | 3 | 0 |
attr |
IOProcessor.vllm_config |
1 | 0 | 0 |
vllm.plugins.lora_resolvers.filesystem_resolver (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FilesystemResolver.init |
2 | 1 | 0 |
attr |
FilesystemResolver.lora_cache_dir |
1 | 0 | 0 |
func |
register_filesystem_resolver |
1 | 0 | 0 |
vllm.plugins.lora_resolvers.hf_hub_resolver (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
register_hf_hub_resolver |
1 | 0 | 0 |
meth |
HfHubResolver.init |
2 | 1 | 0 |
vllm.pooling_params (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolingParams._verify_step_pooling |
3 | 2 | 0 |
meth |
PoolingParams._set_default_parameters |
2 | 1 | 0 |
meth |
PoolingParams._verify_valid_parameters |
1 | 0 | 0 |
prop |
PoolingParams.valid_parameters |
1 | 0 | 0 |
vllm.profiler.layerwise_profile (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayerwiseProfileResults.post_init |
1 | 0 | 0 |
meth |
LayerwiseProfileResults.print_model_table |
2 | 1 | 0 |
meth |
LayerwiseProfileResults.print_summary_table |
2 | 1 | 0 |
meth |
LayerwiseProfileResults.export_model_stats_table_csv |
2 | 1 | 0 |
meth |
LayerwiseProfileResults.export_summary_stats_table_csv |
2 | 1 | 0 |
meth |
LayerwiseProfileResults._indent_row_names_based_on_depth |
3 | 2 | 0 |
meth |
LayerwiseProfileResults._build_correlation_map |
1 | 0 | 0 |
meth |
LayerwiseProfileResults._build_module_tree |
1 | 0 | 0 |
meth |
LayerwiseProfileResults._get_kineto_gpu_event |
2 | 1 | 0 |
meth |
LayerwiseProfileResults._cumulative_cuda_time |
2 | 1 | 0 |
meth |
LayerwiseProfileResults._total_cuda_time |
1 | 0 | 0 |
meth |
LayerwiseProfileResults._build_stats_trees |
1 | 0 | 0 |
attr |
pd |
1 | 0 | 0 |
meth |
layerwise_profile.init |
2 | 1 | 0 |
meth |
layerwise_profile.enter |
1 | 0 | 0 |
meth |
layerwise_profile.exit |
4 | 0 | 0 |
attr |
layerwise_profile.num_running_seqs |
1 | 0 | 0 |
vllm.profiler.utils (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TablePrinter.init |
3 | 2 | 0 |
meth |
TablePrinter.print_table |
2 | 1 | 0 |
meth |
TablePrinter._print_header |
1 | 0 | 0 |
meth |
TablePrinter._print_row |
2 | 0 | 0 |
meth |
TablePrinter._print_line |
1 | 0 | 0 |
attr |
TablePrinter.row_cls |
1 | 0 | 0 |
attr |
TablePrinter.fieldnames |
1 | 0 | 0 |
attr |
TablePrinter.column_widths |
1 | 0 | 0 |
func |
event_arg_repr |
2 | 1 | 0 |
vllm.profiler.wrapper (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CudaProfilerWrapper.annotate_context_manager |
2 | 1 | 0 |
meth |
WorkerProfiler.annotate_context_manager |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
TorchProfilerWrapper.annotate_context_manager |
2 | 1 | 0 |
attr |
TorchProfilerWrapper.local_rank |
1 | 0 | 0 |
attr |
TorchProfilerWrapper.profiler_config |
1 | 0 | 0 |
attr |
TorchProfilerWrapper.dump_cpu_time_total |
1 | 0 | 0 |
attr |
TorchProfilerWrapper.profiler |
1 | 0 | 0 |
vllm.ray.lazy_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_ray_initialized |
1 | 0 | 0 |
func |
is_in_ray_actor |
1 | 0 | 0 |
vllm.ray.ray_env (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
RAY_NON_CARRY_OVER_ENV_VARS |
1 | 0 | 0 |
attr |
RAY_NON_CARRY_OVER_ENV_VARS_FILE |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.reasoning.abs_reasoning_parsers (5 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ReasoningParser.init |
4 | 1 | 1 |
attr |
ReasoningParser.model_tokenizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.reasoning.basic_parsers (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseThinkingReasoningParser.init |
4 | 1 | 0 |
attr |
BaseThinkingReasoningParser.start_token_id |
1 | 0 | 0 |
attr |
BaseThinkingReasoningParser.end_token_id |
1 | 0 | 0 |
vllm.reasoning.deepseek_v3_reasoning_parser (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
DeepSeekV3ReasoningWithThinkingParser.init |
4 | 1 | 0 |
meth |
DeepSeekV3ReasoningParser.init |
4 | 1 | 0 |
vllm.reasoning.ernie45_reasoning_parser (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie45ReasoningParser.init |
4 | 1 | 0 |
attr |
Ernie45ReasoningParser.start_token_id |
1 | 0 | 0 |
attr |
Ernie45ReasoningParser.end_token_id |
1 | 0 | 0 |
attr |
Ernie45ReasoningParser.response_start_token_id |
1 | 0 | 0 |
attr |
Ernie45ReasoningParser.response_end_token_id |
1 | 0 | 0 |
attr |
Ernie45ReasoningParser.newline_token_id |
1 | 0 | 0 |
attr |
Ernie45ReasoningParser.parser_token_ids |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.reasoning.gptoss_reasoning_parser (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
tag_with_builtin_funcs |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
GptOssReasoningParser.init |
4 | 1 | 0 |
attr |
GptOssReasoningParser.reasoning_end_token_ids_prefix |
1 | 0 | 0 |
attr |
GptOssReasoningParser.reasoning_end_token_ids_suffix |
1 | 0 | 0 |
attr |
GptOssReasoningParser.eom_token_id |
1 | 0 | 0 |
attr |
GptOssReasoningParser.reasoning_max_num_between_tokens |
1 | 0 | 0 |
vllm.reasoning.granite_reasoning_parser (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteReasoningParser.init |
4 | 1 | 0 |
attr |
GraniteReasoningParser.think_start_expr |
1 | 0 | 0 |
attr |
GraniteReasoningParser.response_start_expr |
1 | 0 | 0 |
attr |
GraniteReasoningParser.reasoning_regex |
1 | 0 | 0 |
attr |
GraniteReasoningParser.valid_think_starts |
1 | 0 | 0 |
attr |
GraniteReasoningParser.valid_response_starts |
1 | 0 | 0 |
attr |
GraniteReasoningParser.seq_boundary_end |
1 | 0 | 0 |
attr |
GraniteReasoningParser.seq_boundary_start |
1 | 0 | 0 |
attr |
GraniteReasoningParser.longest_think_start |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.reasoning.hunyuan_a13b_reasoning_parser (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
HunyuanA13BReasoningParser.init |
4 | 1 | 0 |
attr |
HunyuanA13BReasoningParser.think_start_expr |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.think_end_expr |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.response_start_expr |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.response_end_expr |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.full_match_reasoning_regex |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.half_match_reasoning_regex |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.think_start_ids |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.think_start_ids_fast |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.response_start_ids |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.response_start_ids_fast |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.response_end_ids |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.fast_think_ids |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.buffered_text |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.buffered_ids |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.current_state |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.all_states |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.expected_sequence |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.expected_sequence_side |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.sequence_index |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.token_buffer |
1 | 0 | 0 |
attr |
HunyuanA13BReasoningParser.text_buffer |
1 | 0 | 0 |
vllm.reasoning.identity_reasoning_parser (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
IdentityReasoningParser.init |
4 | 1 | 0 |
vllm.reasoning.kimi_k2_reasoning_parser (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KimiK2ReasoningParser.init |
4 | 1 | 0 |
vllm.reasoning.minimax_m2_reasoning_parser (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxM2AppendThinkReasoningParser.init |
4 | 1 | 0 |
attr |
MiniMaxM2AppendThinkReasoningParser.end_token_id |
1 | 0 | 0 |
attr |
MiniMaxM2AppendThinkReasoningParser.start_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.reasoning.mistral_reasoning_parser (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MistralReasoningParser.init |
4 | 1 | 0 |
attr |
MistralReasoningParser.start_token_id |
1 | 0 | 0 |
attr |
MistralReasoningParser.end_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.reasoning.olmo3_reasoning_parser (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Indices.len |
1 | 0 | 0 |
meth |
Olmo3ReasoningBuffer.len |
1 | 0 | 0 |
meth |
Olmo3ReasoningParser.init |
4 | 1 | 0 |
attr |
Olmo3ReasoningParser.think_start |
1 | 0 | 0 |
attr |
Olmo3ReasoningParser.think_end |
1 | 0 | 0 |
attr |
Olmo3ReasoningParser.reasoning_regex |
1 | 0 | 0 |
attr |
Olmo3ReasoningParser.buffer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.reasoning.qwen3_reasoning_parser (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3ReasoningParser.init |
4 | 1 | 0 |
attr |
Qwen3ReasoningParser.thinking_enabled |
1 | 0 | 0 |
vllm.reasoning.step3_reasoning_parser (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Step3ReasoningParser.init |
4 | 1 | 0 |
attr |
Step3ReasoningParser.think_end_token |
1 | 0 | 0 |
attr |
Step3ReasoningParser.reasoning_regex |
1 | 0 | 0 |
attr |
Step3ReasoningParser.think_end_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.reasoning.step3p5_reasoning_parser (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Step3p5ReasoningParser.init |
4 | 1 | 0 |
vllm.renderers.base (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseRenderer._apply_prompt_extras |
3 | 2 | 0 |
meth |
BaseRenderer._process_mm_uuids |
5 | 4 | 0 |
meth |
BaseRenderer.render_cmpl |
4 | 3 | 0 |
meth |
BaseRenderer.render_cmpl_async |
4 | 3 | 0 |
meth |
BaseRenderer.render_chat |
5 | 4 | 0 |
meth |
BaseRenderer.render_chat_async |
5 | 4 | 0 |
attr |
BaseRenderer.config |
1 | 0 | 0 |
attr |
BaseRenderer.model_config |
1 | 0 | 0 |
attr |
BaseRenderer.tokenizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.renderers.deepseek_v32 (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.renderers.grok2 (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.renderers.hf (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
HfRenderer.use_unified_vision_chunk |
1 | 0 | 0 |
func |
safe_apply_chat_template |
8 | 7 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.renderers.inputs.preprocess (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
parse_model_prompt |
3 | 2 | 0 |
func |
extract_target_prompt |
3 | 2 | 0 |
func |
extract_prompt_len |
3 | 2 | 0 |
vllm.renderers.mistral (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
safe_apply_chat_template |
4 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.renderers.params (4 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChatParams.with_defaults |
2 | 1 | 0 |
meth |
TokenizeParams.with_kwargs |
2 | 1 | 1 |
attr |
torch |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.renderers.registry (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
renderer_from_config |
3 | 1 | 0 |
attr |
RENDERER_REGISTRY |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.renderers.terratorch (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.sampling_params (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
StructuredOutputsParams.post_init |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
RepetitionDetectionParams.post_init |
1 | 0 | 0 |
vllm.scalar_type (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ScalarType.from_id |
2 | 1 | 0 |
attr |
scalar_types.int4 |
1 | 0 | 0 |
attr |
scalar_types.uint4 |
1 | 0 | 0 |
attr |
scalar_types.int8 |
1 | 0 | 0 |
attr |
scalar_types.uint8 |
1 | 0 | 0 |
attr |
scalar_types.float8_e4m3fn |
1 | 0 | 0 |
attr |
scalar_types.float8_e5m2 |
1 | 0 | 0 |
attr |
scalar_types.float8_e8m0fnu |
1 | 0 | 0 |
attr |
scalar_types.float16_e8m7 |
1 | 0 | 0 |
attr |
scalar_types.float16_e5m10 |
1 | 0 | 0 |
attr |
scalar_types.float6_e3m2f |
1 | 0 | 0 |
attr |
scalar_types.float6_e2m3f |
1 | 0 | 0 |
attr |
scalar_types.float4_e2m1f |
1 | 0 | 0 |
attr |
scalar_types.uint2b2 |
1 | 0 | 0 |
attr |
scalar_types.uint3b4 |
1 | 0 | 0 |
attr |
scalar_types.uint4b8 |
1 | 0 | 0 |
attr |
scalar_types.uint8b128 |
1 | 0 | 0 |
vllm.scripts (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
main |
1 | 0 | 0 |
vllm.sequence (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IntermediateTensors.getitem |
2 | 1 | 0 |
meth |
IntermediateTensors.setitem |
3 | 2 | 0 |
meth |
IntermediateTensors.items |
1 | 0 | 0 |
meth |
IntermediateTensors.len |
1 | 0 | 0 |
meth |
IntermediateTensors.eq |
2 | 1 | 0 |
vllm.third_party.flashmla.flash_mla_interface (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_mla_metadata |
3 | 1 | 0 |
meth |
FlashAttnVarlenFunc.backward |
3 | 2 | 0 |
vllm.third_party.pynvml (980 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
nvmlDeviceGetGspFirmwareMode |
4 | 0 | 0 |
attr |
NVML_VALUE_NOT_AVAILABLE_uint |
1 | 0 | 0 |
func |
nvmlDeviceSetVirtualizationMode |
3 | 0 | 0 |
func |
nvmlDeviceGetVgpuProcessUtilization |
3 | 0 | 0 |
func |
nvmlDeviceGetFBCSessions |
2 | 0 | 0 |
func |
nvmlDeviceIsMigDeviceHandle |
2 | 0 | 0 |
func |
nvmlDeviceGetPowerState |
2 | 0 | 0 |
attr |
c_nvmlVgpuPlacementList_v2_t.fields |
1 | 0 | 0 |
func |
nvmlDeviceGetTotalEccErrors |
4 | 0 | 0 |
func |
nvmlDeviceGetHandleBySerial |
2 | 0 | 0 |
func |
nvmlDeviceGetVgpuTypeCreatablePlacements |
4 | 0 | 0 |
func |
nvmlVgpuInstanceClearAccountingPids |
2 | 0 | 0 |
func |
nvmlDeviceGetAutoBoostedClocksEnabled |
2 | 0 | 0 |
func |
nvmlDeviceGetVgpuMetadata |
2 | 0 | 0 |
func |
nvmlDeviceGetPowerUsage |
2 | 0 | 0 |
func |
nvmlDeviceGetFanSpeed |
2 | 0 | 0 |
func |
nvmlDeviceGetSupportedMemoryClocks |
2 | 0 | 0 |
func |
nvmlUnitGetUnitInfo |
2 | 0 | 0 |
func |
nvmlDeviceSetMemClkVfOffset |
3 | 0 | 0 |
func |
nvmlDeviceGetPendingDramEncryptionMode |
2 | 0 | 0 |
func |
nvmlDeviceGetTemperature |
3 | 0 | 0 |
func |
nvmlDeviceSetDefaultFanSpeed_v2 |
3 | 0 | 0 |
func |
nvmlDeviceGetInforomVersion |
3 | 0 | 0 |
func |
nvmlDeviceGetPerformanceState |
2 | 0 | 0 |
func |
nvmlUnitGetTemperature |
3 | 0 | 0 |
func |
nvmlShutdown |
1 | 0 | 0 |
func |
nvmlDeviceGetCurrPcieLinkGeneration |
2 | 0 | 0 |
func |
nvmlDeviceGetMPSComputeRunningProcesses |
2 | 0 | 0 |
func |
nvmlDeviceGetCapabilities |
3 | 0 | 0 |
func |
nvmlDeviceGetNvLinkState |
3 | 0 | 0 |
attr |
NVML_VALUE_NOT_AVAILABLE_ulonglong |
1 | 0 | 0 |
func |
nvmlDeviceGetGpuInstancePossiblePlacements |
5 | 0 | 0 |
func |
nvmlGpmSampleGet |
3 | 0 | 0 |
func |
nvmlVgpuInstanceGetEccMode |
2 | 0 | 0 |
attr |
c_nvmlGpmSample_t |
1 | 0 | 0 |
func |
nvmlDeviceSetDramEncryptionMode |
3 | 0 | 0 |
func |
nvmlDeviceGetBAR1MemoryInfo |
2 | 0 | 0 |
func |
nvmlDeviceGetArchitecture |
2 | 0 | 0 |
func |
nvmlDeviceSetDefaultAutoBoostedClocksEnabled |
4 | 0 | 0 |
func |
nvmlDeviceResetNvLinkErrorCounters |
3 | 0 | 0 |
func |
nvmlDeviceGetNvLinkUtilizationControl |
4 | 0 | 0 |
func |
nvmlDeviceGetNvLinkUtilizationCounter |
4 | 0 | 0 |
func |
nvmlDeviceGetClock |
4 | 0 | 0 |
func |
nvmlDeviceGetMaxPcieLinkWidth |
2 | 0 | 0 |
func |
nvmlUnitGetDevices |
2 | 0 | 0 |
func |
nvmlDeviceQueryDrainState |
2 | 0 | 0 |
attr |
c_nvmlVgpuInstancesUtilizationInfo_v1_t.fields |
1 | 0 | 0 |
func |
nvmlDeviceGetMultiGpuBoard |
2 | 0 | 0 |
func |
nvmlDeviceSetVgpuSchedulerState |
3 | 0 | 0 |
func |
nvmlDeviceWorkloadPowerProfileSetRequestedProfiles |
3 | 0 | 0 |
func |
nvmlDeviceGetMigDeviceHandleByIndex |
3 | 0 | 0 |
func |
nvmlDeviceGetRemappedRows |
2 | 0 | 0 |
func |
nvmlDeviceGetInforomImageVersion |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetUUID |
2 | 0 | 0 |
func |
nvmlDeviceGetComputeRunningProcesses |
2 | 0 | 0 |
func |
nvmlGpmQueryDeviceSupport |
2 | 0 | 0 |
meth |
c_nvmlWorkloadPowerProfileCurrentProfiles_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceGetNvLinkCapability |
4 | 0 | 0 |
func |
nvmlDeviceGetCoolerInfo |
2 | 0 | 0 |
func |
nvmlUnitGetCount |
1 | 0 | 0 |
func |
nvmlDevicePowerSmoothingUpdatePresetProfileParam |
3 | 0 | 0 |
meth |
c_nvmlNvlinkSupportedBwModes_v1_t.init |
1 | 0 | 0 |
func |
nvmlGpmMetricsGet |
2 | 0 | 0 |
attr |
c_nvmlProcessDetailList_v1_t.fields |
1 | 0 | 0 |
func |
nvmlDeviceGetMPSComputeRunningProcesses_v3 |
2 | 0 | 0 |
func |
nvmlDeviceWorkloadPowerProfileGetProfilesInfo |
3 | 0 | 0 |
func |
nvmlDeviceSetPersistenceMode |
3 | 0 | 0 |
func |
nvmlDeviceGetRunningProcessDetailList |
4 | 0 | 0 |
func |
nvmlVgpuInstanceGetFBCStats |
2 | 0 | 0 |
func |
nvmlUnitGetFanSpeedInfo |
2 | 0 | 0 |
func |
nvmlEventSetWait_v2 |
3 | 0 | 0 |
func |
nvmlVgpuInstanceGetVmDriverVersion |
2 | 0 | 0 |
func |
nvmlDeviceGetTargetFanSpeed |
3 | 0 | 0 |
func |
nvmlDeviceGetGpuFabricInfo |
3 | 0 | 0 |
func |
nvmlSystemGetCudaDriverVersion_v2 |
1 | 0 | 0 |
func |
nvmlVgpuTypeGetMaxInstancesPerVm |
2 | 0 | 0 |
func |
nvmlDeviceGetAccountingMode |
2 | 0 | 0 |
func |
nvmlSystemGetNVMLVersion |
1 | 0 | 0 |
func |
nvmlDeviceSetApplicationsClocks |
4 | 0 | 0 |
func |
nvmlDeviceGetNvLinkRemoteDeviceType |
3 | 0 | 0 |
func |
nvmlDeviceGetSupportedPerformanceStates |
2 | 0 | 0 |
func |
nvmlDeviceGetPcieThroughput |
3 | 0 | 0 |
func |
nvmlGetVgpuVersion |
3 | 0 | 0 |
func |
nvmlDeviceGetDeviceHandleFromMigDeviceHandle |
2 | 0 | 0 |
func |
nvmlDeviceOnSameBoard |
3 | 0 | 0 |
func |
nvmlDeviceGetSerial |
2 | 0 | 0 |
func |
nvmlDeviceGetUUID |
2 | 0 | 0 |
func |
nvmlDeviceGetFanSpeed_v2 |
3 | 0 | 0 |
func |
nvmlGetExcludedDeviceInfoByIndex |
2 | 0 | 0 |
func |
nvmlDeviceGetAccountingPids |
2 | 0 | 0 |
func |
nvmlDeviceGetClkMonStatus |
3 | 0 | 0 |
func |
nvmlDeviceGetVgpuUtilization |
3 | 0 | 0 |
func |
nvmlDeviceGetPlatformInfo |
3 | 0 | 0 |
attr |
libLoadLock |
1 | 0 | 0 |
func |
nvmlVgpuInstanceGetFBCSessions |
2 | 0 | 0 |
func |
nvmlSystemSetNvlinkBwMode |
2 | 0 | 0 |
func |
nvmlDeviceGetPowerSource |
2 | 0 | 0 |
func |
nvmlVgpuTypeGetGpuInstanceProfileId |
2 | 0 | 0 |
func |
nvmlDeviceClearAccountingPids |
2 | 0 | 0 |
func |
nvmlDeviceGetGridLicensableFeatures_v4 |
2 | 0 | 0 |
func |
nvmlDeviceSetGpuOperationMode |
3 | 0 | 0 |
func |
nvmlDeviceGetSupportedClocksThrottleReasons |
2 | 0 | 0 |
func |
nvmlDeviceGetMemClkMinMaxVfOffset |
4 | 0 | 0 |
func |
nvmlSystemGetTopologyGpuSet |
2 | 0 | 0 |
func |
nvmlSystemGetCudaDriverVersion |
1 | 0 | 0 |
func |
nvmlDeviceGetAPIRestriction |
3 | 0 | 0 |
func |
nvmlDeviceClearEccErrorCounts |
3 | 0 | 0 |
func |
nvmlDeviceGetProcessUtilization |
3 | 0 | 0 |
meth |
c_nvmlSystemConfComputeSettings_v1_t.init |
1 | 0 | 0 |
func |
nvmlVgpuInstanceSetEncoderCapacity |
3 | 0 | 0 |
func |
nvmlDeviceGetCurrentDramEncryptionMode |
2 | 0 | 0 |
func |
nvmlGpmQueryIfStreamingEnabled |
2 | 0 | 0 |
func |
nvmlDeviceGetSupportedClocksEventReasons |
2 | 0 | 0 |
func |
nvmlDeviceGetP2PStatus |
4 | 0 | 0 |
func |
nvmlDeviceGetEnforcedPowerLimit |
2 | 0 | 0 |
func |
nvmlDeviceValidateInforom |
2 | 0 | 0 |
func |
nvmlGpmSetStreamingEnabled |
3 | 0 | 0 |
attr |
c_nvmlVgpuProcessesUtilizationInfo_v1_t.fields |
1 | 0 | 0 |
func |
nvmlDeviceGetGpcClkVfOffset |
2 | 0 | 0 |
func |
nvmlDeviceRemoveGpu |
2 | 0 | 0 |
func |
nvmlDeviceGetRetiredPagesPendingStatus |
2 | 0 | 0 |
func |
nvmlSystemGetConfComputeSettings |
2 | 0 | 0 |
func |
convertStrBytes |
2 | 0 | 0 |
func |
nvmlComputeInstanceDestroy |
2 | 0 | 0 |
func |
nvmlDeviceGetMemClkVfOffset |
2 | 0 | 0 |
func |
nvmlDeviceGetViolationStatus |
3 | 0 | 0 |
func |
nvmlDeviceGetPendingGpuOperationMode |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetLicenseStatus |
2 | 0 | 0 |
func |
nvmlDevicePowerSmoothingSetState |
3 | 0 | 0 |
func |
nvmlVgpuInstanceGetLicenseInfo |
2 | 0 | 0 |
func |
nvmlVgpuTypeGetCapabilities |
3 | 0 | 0 |
attr |
c_nvmlProcessesUtilizationInfo_v1_t.fields |
1 | 0 | 0 |
func |
nvmlDeviceSetAutoBoostedClocksEnabled |
3 | 0 | 0 |
func |
nvmlDeviceGetBoardId |
2 | 0 | 0 |
func |
nvmlDeviceGetPendingEccMode |
2 | 0 | 0 |
func |
nvmlGpuInstanceGetComputeInstances |
5 | 0 | 0 |
meth |
c_nvmlPowerSmoothingState_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceGetGpuInstanceById |
3 | 0 | 0 |
func |
nvmlSystemGetNvlinkBwMode |
1 | 0 | 0 |
meth |
c_nvmlDeviceCapabilities_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceGetCreatableVgpus |
2 | 0 | 0 |
func |
nvmlDeviceGetMPSComputeRunningProcesses_v2 |
2 | 0 | 0 |
func |
nvmlDeviceGetVgpuTypeSupportedPlacements |
5 | 0 | 0 |
func |
nvmlSystemGetHicVersion |
1 | 0 | 0 |
func |
nvmlDeviceGetMemoryErrorCounter |
5 | 0 | 0 |
func |
nvmlVgpuInstanceGetVmID |
2 | 0 | 0 |
func |
nvmlDeviceGetComputeInstanceId |
2 | 0 | 0 |
func |
nvmlDeviceGetClockInfo |
3 | 0 | 0 |
meth |
c_nvmlWorkloadPowerProfileInfo_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceSetAPIRestriction |
4 | 0 | 0 |
func |
nvmlSystemGetConfComputeCapabilities |
1 | 0 | 0 |
func |
nvmlDeviceGetInforomConfigurationChecksum |
2 | 0 | 0 |
func |
nvmlDeviceGetGpcClkMinMaxVfOffset |
4 | 0 | 0 |
func |
nvmlDeviceClearCpuAffinity |
2 | 0 | 0 |
func |
nvmlDeviceGetCurrPcieLinkWidth |
2 | 0 | 0 |
func |
nvmlDeviceSetMemoryLockedClocks |
4 | 0 | 0 |
func |
nvmlVgpuInstanceGetMetadata |
2 | 0 | 0 |
func |
nvmlDeviceGetFieldValues |
3 | 0 | 0 |
func |
nvmlDeviceSetMigMode |
3 | 0 | 0 |
func |
nvmlVgpuTypeGetLicense |
2 | 0 | 0 |
func |
nvmlDeviceGetPowerManagementMode |
2 | 0 | 0 |
func |
nvmlEventSetWait |
3 | 0 | 0 |
func |
nvmlVgpuTypeGetMaxInstances |
3 | 0 | 0 |
func |
nvmlDeviceGetPgpuMetadataString |
2 | 0 | 0 |
func |
nvmlDeviceSetEccMode |
3 | 0 | 0 |
func |
nvmlDeviceSetGpcClkVfOffset |
3 | 0 | 0 |
func |
nvmlDeviceRegisterEvents |
4 | 0 | 0 |
func |
nvmlDeviceGetFanControlPolicy_v2 |
4 | 0 | 0 |
func |
nvmlVgpuInstanceGetEncoderSessions |
2 | 0 | 0 |
func |
nvmlSystemGetConfComputeGpusReadyState |
1 | 0 | 0 |
func |
nvmlDeviceGetProcessesUtilizationInfo |
3 | 0 | 0 |
func |
nvmlDeviceGetMemoryAffinity |
4 | 0 | 0 |
func |
nvmlDeviceGetPciInfo_v3 |
2 | 0 | 0 |
func |
nvmlVgpuTypeGetFbReservation |
2 | 0 | 0 |
func |
nvmlDeviceCreateGpuInstanceWithPlacement |
4 | 0 | 0 |
func |
nvmlDeviceGetConfComputeGpuCertificate |
2 | 0 | 0 |
func |
nvmlDevicePowerSmoothingActivatePresetProfile |
3 | 0 | 0 |
func |
nvmlDeviceGetDynamicPstatesInfo |
3 | 0 | 0 |
func |
nvmlDeviceGetNumFans |
2 | 0 | 0 |
func |
nvmlDeviceGetDetailedEccErrors |
4 | 0 | 0 |
func |
nvmlDeviceGetVgpuSchedulerCapabilities |
2 | 0 | 0 |
meth |
c_nvmlGpuFabricInfoV_t.init |
1 | 0 | 0 |
func |
nvmlUnitGetDeviceCount |
2 | 0 | 0 |
func |
nvmlEventSetFree |
2 | 0 | 0 |
func |
nvmlVgpuTypeGetFramebufferSize |
2 | 0 | 0 |
func |
nvmlUnitGetHandleByIndex |
2 | 0 | 0 |
func |
nvmlGpuInstanceDestroy |
2 | 0 | 0 |
func |
nvmlDeviceGetVgpuHeterogeneousMode |
2 | 0 | 0 |
func |
nvmlDeviceSetVgpuHeterogeneousMode |
3 | 0 | 0 |
func |
nvmlDeviceGetMinMaxFanSpeed |
4 | 0 | 0 |
func |
nvmlDeviceGetGpuMaxPcieLinkGeneration |
2 | 0 | 0 |
func |
nvmlDeviceGetDecoderUtilization |
2 | 0 | 0 |
meth |
nvmlFriendlyObject.init |
2 | 0 | 0 |
meth |
nvmlFriendlyObject.str |
1 | 0 | 0 |
func |
nvmlDeviceGetTemperatureThreshold |
3 | 0 | 0 |
func |
nvmlDeviceGetRetiredPages_v2 |
3 | 0 | 0 |
func |
nvmlDeviceDiscoverGpus |
2 | 0 | 0 |
func |
nvmlUnitGetLedState |
2 | 0 | 0 |
func |
nvmlDeviceGetCudaComputeCapability |
2 | 0 | 0 |
func |
nvmlDeviceGetMemoryBusWidth |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetAccountingPids |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetAccountingMode |
2 | 0 | 0 |
func |
nvmlDeviceGetVbiosVersion |
2 | 0 | 0 |
func |
nvmlDeviceGetGpuInstanceRemainingCapacity |
3 | 0 | 0 |
func |
nvmlDeviceGetEccMode |
2 | 0 | 0 |
func |
nvmlGetVgpuCompatibility |
3 | 0 | 0 |
func |
nvmlVgpuInstanceGetEncoderCapacity |
2 | 0 | 0 |
func |
nvmlDeviceGetBrand |
2 | 0 | 0 |
func |
nvmlSystemSetConfComputeGpusReadyState |
2 | 0 | 0 |
func |
nvmlDeviceGetHandleByUUID |
2 | 0 | 0 |
func |
nvmlDeviceGetAdaptiveClockInfoStatus |
2 | 0 | 0 |
func |
nvmlDeviceGetName |
2 | 0 | 0 |
attr |
NVML_VGPU_METADATA_OPAQUE_DATA_SIZE |
1 | 0 | 0 |
func |
nvmlDeviceGetCurrentEccMode |
2 | 0 | 0 |
func |
nvmlDeviceGetVgpuSchedulerLog |
2 | 0 | 0 |
func |
nvmlDeviceGetPowerManagementDefaultLimit |
2 | 0 | 0 |
func |
nvmlDeviceGetMemoryInfo |
3 | 0 | 0 |
func |
nvmlDeviceSetNvLinkDeviceLowPowerThreshold |
3 | 0 | 0 |
func |
nvmlDeviceGetConfComputeProtectedMemoryUsage |
2 | 0 | 0 |
func |
nvmlDeviceGetGraphicsRunningProcesses_v2 |
2 | 0 | 0 |
func |
nvmlDeviceGetOfaUtilization |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetGpuInstanceId |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetType |
2 | 0 | 0 |
func |
nvmlUnitSetLedState |
3 | 0 | 0 |
func |
nvmlErrorString |
2 | 0 | 0 |
func |
nvmlDeviceGetGspFirmwareVersion |
3 | 0 | 0 |
func |
nvmlDeviceGetVgpuProcessesUtilizationInfo |
3 | 0 | 0 |
func |
nvmlDeviceGetFBCStats |
2 | 0 | 0 |
func |
nvmlGetExcludedDeviceCount |
1 | 0 | 0 |
func |
nvmlDeviceCreateGpuInstance |
3 | 0 | 0 |
func |
nvmlDeviceGetDriverModel |
2 | 0 | 0 |
func |
nvmlDeviceResetGpuLockedClocks |
2 | 0 | 0 |
func |
nvmlDeviceResetMemoryLockedClocks |
2 | 0 | 0 |
func |
nvmlDeviceResetApplicationsClocks |
2 | 0 | 0 |
func |
nvmlDeviceGetActiveVgpus |
2 | 0 | 0 |
func |
nvmlDeviceGetDefaultEccMode |
2 | 0 | 0 |
func |
nvmlDeviceGetPcieLinkMaxSpeed |
2 | 0 | 0 |
func |
nvmlDeviceGetTopologyCommonAncestor |
3 | 0 | 0 |
func |
nvmlDeviceGetPowerManagementLimitConstraints |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetGpuPciId |
2 | 0 | 0 |
func |
nvmlDeviceGetMinMaxClockOfPState |
6 | 0 | 0 |
func |
nvmlDeviceGetGraphicsRunningProcesses_v3 |
2 | 0 | 0 |
func |
nvmlDeviceModifyDrainState |
3 | 0 | 0 |
func |
nvmlDeviceGetDefaultApplicationsClock |
3 | 0 | 0 |
attr |
c_nvmlGpuInstance_t |
1 | 0 | 0 |
attr |
c_nvmlEventSet_t |
1 | 0 | 0 |
func |
nvmlDeviceGetConfComputeGpuAttestationReport |
3 | 0 | 0 |
func |
nvmlDeviceGetSupportedGraphicsClocks |
3 | 0 | 0 |
func |
nvmlDeviceGetMinorNumber |
2 | 0 | 0 |
func |
nvmlSystemGetConfComputeState |
1 | 0 | 0 |
func |
nvmlDeviceGetPciInfoExt |
3 | 0 | 0 |
func |
nvmlDeviceSetComputeMode |
3 | 0 | 0 |
func |
nvmlDeviceGetNvLinkVersion |
3 | 0 | 0 |
func |
nvmlDeviceSetDriverModel |
3 | 0 | 0 |
func |
nvmlDeviceGetGraphicsRunningProcesses |
2 | 0 | 0 |
func |
nvmlDeviceGetC2cModeInfoV |
2 | 0 | 0 |
func |
throwOnVersionMismatch |
2 | 0 | 0 |
func |
nvmlDeviceGetIrqNum |
2 | 0 | 0 |
func |
nvmlVgpuTypeGetDeviceID |
2 | 0 | 0 |
func |
nvmlDeviceSetClockOffsets |
3 | 0 | 0 |
func |
nvmlDeviceSetPowerManagementLimit |
3 | 0 | 0 |
func |
nvmlDeviceGetRetiredPages |
3 | 0 | 0 |
func |
nvmlDeviceGetTopologyNearestGpus |
3 | 0 | 0 |
func |
nvmlDeviceGetGridLicensableFeatures |
2 | 0 | 0 |
func |
nvmlDeviceGetMaxPcieLinkGeneration |
2 | 0 | 0 |
func |
nvmlDeviceGetCurrentDriverModel |
2 | 0 | 0 |
func |
nvmlDeviceSetNvLinkUtilizationControl |
6 | 0 | 0 |
func |
nvmlDeviceGetJpgUtilization |
2 | 0 | 0 |
func |
nvmlGpuInstanceCreateComputeInstance |
3 | 0 | 0 |
func |
nvmlVgpuTypeGetNumDisplayHeads |
2 | 0 | 0 |
meth |
c_nvmlNvlinkSetBwMode_v1_t.init |
1 | 0 | 0 |
func |
nvmlSystemSetConfComputeKeyRotationThresholdInfo |
2 | 0 | 0 |
func |
nvmlVgpuTypeGetFrameRateLimit |
2 | 0 | 0 |
func |
nvmlDeviceGetHostVgpuMode |
2 | 0 | 0 |
func |
nvmlDeviceGetPerformanceModes |
2 | 0 | 0 |
func |
nvmlDeviceGetSamples |
4 | 0 | 0 |
func |
nvmlSetVgpuVersion |
2 | 0 | 0 |
meth |
c_nvmlComputeInstanceProfileInfo_v2_t.init |
1 | 0 | 0 |
func |
nvmlDeviceSetFanSpeed_v2 |
4 | 0 | 0 |
func |
nvmlDeviceGetPcieReplayCounter |
2 | 0 | 0 |
func |
nvmlDeviceGetPciInfo |
2 | 0 | 0 |
attr |
c_nvmlUnit_t |
1 | 0 | 0 |
func |
nvmlDeviceGetAttributes |
2 | 0 | 0 |
func |
nvmlDeviceGetPersistenceMode |
2 | 0 | 0 |
attr |
c_nvmlDevice_t |
1 | 0 | 0 |
func |
nvmlVgpuTypeGetName |
2 | 0 | 0 |
func |
nvmlDeviceGetAccountingBufferSize |
2 | 0 | 0 |
func |
nvmlGpuInstanceGetInfo |
2 | 0 | 0 |
func |
nvmlGpmSampleFree |
2 | 0 | 0 |
func |
nvmlDeviceGetAccountingStats |
3 | 0 | 0 |
func |
nvmlDeviceGetMaxMigDeviceCount |
2 | 0 | 0 |
func |
nvmlDeviceGetMigMode |
2 | 0 | 0 |
func |
nvmlDeviceGetMaxClockInfo |
3 | 0 | 0 |
func |
nvmlVgpuTypeGetGspHeapSize |
2 | 0 | 0 |
func |
nvmlGpmSampleAlloc |
1 | 0 | 0 |
meth |
c_nvmlWorkloadPowerProfileRequestedProfiles_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceGetDisplayActive |
2 | 0 | 0 |
func |
nvmlDeviceGetNvlinkSupportedBwModes |
3 | 0 | 0 |
func |
nvmlDeviceGetRowRemapperHistogram |
2 | 0 | 0 |
func |
nvmlDeviceGetComputeMode |
2 | 0 | 0 |
func |
nvmlDeviceGetNvlinkBwMode |
3 | 0 | 0 |
func |
nvmlDeviceGetApplicationsClock |
3 | 0 | 0 |
meth |
c_nvmlPowerSmoothingProfile_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceGetBoardPartNumber |
2 | 0 | 0 |
func |
nvmlDeviceGetNvLinkRemotePciInfo |
3 | 0 | 0 |
meth |
c_nvmlPlatformInfo_v1_t.init |
1 | 0 | 0 |
func |
nvmlSystemGetProcessName |
2 | 0 | 0 |
func |
nvmlSystemGetDriverVersion |
1 | 0 | 0 |
func |
nvmlDeviceGetDisplayMode |
2 | 0 | 0 |
func |
nvmlDeviceGetFanSpeedRPM |
2 | 0 | 0 |
func |
nvmlDeviceFreezeNvLinkUtilizationCounter |
5 | 0 | 0 |
func |
nvmlDeviceGetGpuInstanceId |
2 | 0 | 0 |
attr |
c_nvmlComputeInstance_t |
1 | 0 | 0 |
func |
nvmlFriendlyObjectToStruct |
3 | 0 | 0 |
func |
nvmlDeviceGetEncoderStats |
2 | 0 | 0 |
func |
nvmlDeviceSetPowerManagementLimit_v2 |
5 | 0 | 0 |
func |
nvmlGpuInstanceGetComputeInstanceRemainingCapacity |
3 | 0 | 0 |
func |
nvmlDeviceGetMaxCustomerBoostClock |
3 | 0 | 0 |
meth |
c_nvmlNvlinkGetBwMode_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceSetFanControlPolicy |
4 | 0 | 0 |
func |
nvmlDeviceGetCpuAffinityWithinScope |
4 | 0 | 0 |
func |
nvmlDeviceGetNvLinkErrorCounter |
4 | 0 | 0 |
func |
nvmlVgpuInstanceGetFrameRateLimit |
2 | 0 | 0 |
func |
nvmlEventSetCreate |
1 | 0 | 0 |
func |
nvmlDeviceSetTemperatureThreshold |
4 | 0 | 0 |
func |
nvmlDeviceClearFieldValues |
3 | 0 | 0 |
func |
nvmlDeviceGetComputeRunningProcesses_v2 |
2 | 0 | 0 |
func |
nvmlDeviceWorkloadPowerProfileGetCurrentProfiles |
3 | 0 | 0 |
func |
nvmlDeviceGetComputeRunningProcesses_v3 |
2 | 0 | 0 |
func |
nvmlDeviceGetSupportedEventTypes |
2 | 0 | 0 |
func |
nvmlGpuInstanceGetComputeInstanceProfileInfo |
5 | 0 | 0 |
func |
nvmlVgpuInstanceGetRuntimeStateSize |
2 | 0 | 0 |
func |
nvmlDeviceGetConfComputeMemSizeInfo |
2 | 0 | 0 |
func |
nvmlDeviceGetVgpuInstancesUtilizationInfo |
3 | 0 | 0 |
meth |
c_nvmlWorkloadPowerProfileProfilesInfo_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceGetGpuInstanceProfileInfo |
4 | 0 | 0 |
func |
nvmlVgpuInstanceGetFbUsage |
2 | 0 | 0 |
func |
nvmlDeviceGetHandleByIndex |
2 | 0 | 0 |
attr |
c_nvmlVgpuPlacementList_v1_t.fields |
1 | 0 | 0 |
func |
nvmlDeviceGetGpuOperationMode |
2 | 0 | 0 |
func |
nvmlDeviceGetTemperatureV1 |
3 | 0 | 0 |
func |
nvmlDeviceGetThermalSettings |
4 | 0 | 0 |
func |
nvmlDeviceGetBusType |
2 | 0 | 0 |
func |
nvmlDeviceSetGpuLockedClocks |
4 | 0 | 0 |
func |
nvmlSystemGetDriverBranch |
1 | 0 | 0 |
func |
nvmlGetVgpuDriverCapabilities |
2 | 0 | 0 |
func |
nvmlGpuInstanceGetComputeInstancePossiblePlacements |
5 | 0 | 0 |
func |
nvmlVgpuInstanceGetPlacementId |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetEncoderStats |
2 | 0 | 0 |
func |
nvmlComputeInstanceGetInfo |
2 | 0 | 0 |
func |
nvmlDeviceGetCurrentClockFreqs |
2 | 0 | 0 |
func |
nvmlDeviceGetClockOffsets |
3 | 0 | 0 |
func |
nvmlDeviceSetAccountingMode |
3 | 0 | 0 |
func |
nvmlSystemGetConfComputeKeyRotationThresholdInfo |
1 | 0 | 0 |
func |
nvmlDeviceGetNumaNodeId |
2 | 0 | 0 |
func |
nvmlDeviceGetLastBBXFlushTime |
2 | 0 | 0 |
func |
nvmlDeviceGetMarginTemperature |
2 | 0 | 0 |
func |
nvmlDeviceGetCurrentGpuOperationMode |
2 | 0 | 0 |
func |
nvmlDeviceGetC2cModeInfoV1 |
2 | 0 | 0 |
func |
nvmlDeviceGetVirtualizationMode |
2 | 0 | 0 |
func |
nvmlDeviceGetUtilizationRates |
2 | 0 | 0 |
func |
nvmlDeviceGetDramEncryptionMode |
2 | 0 | 0 |
func |
nvmlDeviceGetCurrentClocksEventReasons |
2 | 0 | 0 |
func |
nvmlDeviceGetModuleId |
3 | 0 | 0 |
func |
nvmlDeviceGetCurrentClocksThrottleReasons |
2 | 0 | 0 |
func |
nvmlDeviceGetCpuAffinity |
3 | 0 | 0 |
func |
nvmlGpuInstanceGetComputeInstanceById |
3 | 0 | 0 |
func |
nvmlDeviceGetSupportedVgpus |
2 | 0 | 0 |
func |
nvmlDeviceGetPendingDriverModel |
2 | 0 | 0 |
func |
nvmlDeviceGetGpuFabricInfoV |
3 | 0 | 0 |
func |
nvmlDeviceGetCount |
1 | 0 | 0 |
func |
nvmlGpuInstanceCreateComputeInstanceWithPlacement |
4 | 0 | 0 |
func |
nvmlExceptionClass |
2 | 0 | 0 |
func |
nvmlDeviceGetAttributes_v2 |
2 | 0 | 0 |
func |
nvmlDeviceGetEncoderSessions |
2 | 0 | 0 |
meth |
NVMLError.new |
2 | 0 | 0 |
meth |
NVMLError.str |
1 | 0 | 0 |
meth |
NVMLError.eq |
2 | 0 | 0 |
attr |
NVMLError._valClassMapping |
1 | 0 | 0 |
meth |
c_nvmlEccSramErrorStatus_v1_t.init |
1 | 0 | 0 |
func |
nvmlDeviceSetVgpuCapabilities |
4 | 0 | 0 |
func |
nvmlStructToFriendlyObject |
2 | 0 | 0 |
func |
nvmlDeviceSetCpuAffinity |
2 | 0 | 0 |
func |
nvmlDeviceGetBridgeChipInfo |
2 | 0 | 0 |
meth |
c_nvmlGpuInstanceProfileInfo_v2_t.init |
1 | 0 | 0 |
func |
nvmlDeviceSetConfComputeUnprotectedMemSize |
3 | 0 | 0 |
func |
nvmlGpmMigSampleGet |
4 | 0 | 0 |
func |
nvmlDeviceGetEncoderUtilization |
2 | 0 | 0 |
func |
nvmlDeviceGetHandleByPciBusId |
2 | 0 | 0 |
func |
nvmlVgpuTypeGetClass |
2 | 0 | 0 |
func |
nvmlVgpuInstanceGetLicenseInfo_v2 |
2 | 0 | 0 |
func |
nvmlDeviceGetVgpuSchedulerState |
2 | 0 | 0 |
func |
nvmlDeviceGetTotalEnergyConsumption |
2 | 0 | 0 |
func |
nvmlDeviceWorkloadPowerProfileClearRequestedProfiles |
3 | 0 | 0 |
func |
nvmlVgpuInstanceGetMdevUUID |
2 | 0 | 0 |
func |
nvmlDeviceGetNumGpuCores |
2 | 0 | 0 |
func |
nvmlDeviceSetNvlinkBwMode |
3 | 0 | 0 |
func |
nvmlInit |
1 | 0 | 0 |
func |
nvmlDeviceGetEncoderCapacity |
3 | 0 | 0 |
func |
nvmlDeviceResetNvLinkUtilizationCounter |
4 | 0 | 0 |
func |
nvmlDeviceGetTemperatureV |
4 | 0 | 0 |
meth |
c_nvmlDramEncryptionInfo_t.init |
1 | 0 | 0 |
func |
nvmlVgpuInstanceGetAccountingStats |
3 | 0 | 0 |
func |
nvmlVgpuTypeGetBAR1Info |
2 | 0 | 0 |
func |
nvmlDeviceGetSramEccErrorStatus |
3 | 0 | 0 |
func |
nvmlDeviceGetIndex |
2 | 0 | 0 |
func |
nvmlDeviceGetPowerManagementLimit |
2 | 0 | 0 |
func |
nvmlInitWithFlags |
2 | 0 | 0 |
func |
nvmlVgpuTypeGetResolution |
2 | 0 | 0 |
func |
nvmlUnitGetPsuInfo |
2 | 0 | 0 |
func |
nvmlComputeInstanceGetInfo_v2 |
2 | 0 | 0 |
func |
nvmlDeviceGetGpuInstances |
5 | 0 | 0 |
func |
nvmlDeviceGetPcieSpeed |
2 | 0 | 0 |
func |
nvmlDeviceGetVgpuCapabilities |
3 | 0 | 0 |
vllm.third_party.triton_kernels.compaction (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
compaction |
5 | 0 | 0 |
func |
compaction_torch |
5 | 3 | 0 |
vllm.third_party.triton_kernels.distributed (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
make_expt_assignment |
5 | 2 | 0 |
func |
make_expt_dict_random |
3 | 0 | 0 |
attr |
symm_mem_pool |
1 | 0 | 0 |
func |
make_expt_dict_uniform |
3 | 0 | 0 |
meth |
SymmetricMemoryPool.init |
1 | 0 | 0 |
attr |
SymmetricMemoryPool.size |
1 | 0 | 0 |
attr |
SymmetricMemoryPool.buf |
1 | 0 | 0 |
attr |
SymmetricMemoryPool.bufs |
1 | 0 | 0 |
attr |
SymmetricMemoryPool.hdl |
1 | 0 | 0 |
attr |
SymmetricMemoryPool.regions |
1 | 0 | 0 |
func |
convert_dp_to_ep |
5 | 0 | 0 |
func |
convert_ep_to_dp |
5 | 0 | 0 |
vllm.third_party.triton_kernels.matmul_ogs (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
matmul_ogs_set_idle_sms |
2 | 0 | 0 |
func |
get_swap_xw |
3 | 0 | 0 |
func |
post_matmul_comm_torch |
6 | 5 | 0 |
func |
init_allocation |
11 | 0 | 0 |
func |
apply_allocation |
3 | 1 | 0 |
func |
matmul_ogs |
17 | 13 | 0 |
func |
can_overflow_int32 |
2 | 1 | 0 |
meth |
RoutingData.n_blocks |
3 | 0 | 0 |
func |
should_upcast_indices |
2 | 0 | 0 |
func |
matmul_ogs_torch |
13 | 5 | 0 |
meth |
InnerRoutingData.make_kernel_args |
3 | 0 | 0 |
attr |
specializations |
1 | 0 | 0 |
vllm.third_party.triton_kernels.numerics (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseFlexData.view |
2 | 1 | 0 |
meth |
BaseFlexData.reinterpret |
2 | 0 | 0 |
prop |
InFlexData.is_per_batch |
1 | 0 | 0 |
meth |
OutFlexData.iter |
1 | 0 | 0 |
prop |
OutFlexData.is_per_batch |
1 | 0 | 0 |
vllm.third_party.triton_kernels.numerics_details.flexpoint (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
TL_MAX_FINITE_FLOAT8E4NV |
1 | 0 | 0 |
func |
flex_to_float |
3 | 0 | 0 |
attr |
TL_RCP_MAX_FINITE_FLOAT16 |
1 | 0 | 0 |
func |
load_scale |
2 | 0 | 0 |
func |
sm86_min_nan_xorsign_abs_f32 |
3 | 0 | 0 |
func |
update_scale |
4 | 1 | 0 |
attr |
TL_MAX_FINITE_FLOAT8E5 |
1 | 0 | 0 |
attr |
TL_RCP_MAX_FINITE_FLOAT8E4B15 |
1 | 0 | 0 |
attr |
TL_RCP_MAX_FINITE_FLOAT8E5 |
1 | 0 | 0 |
attr |
TL_RCP_MAX_FINITE_FLOAT8E4NV |
1 | 0 | 0 |
func |
sm86_max_nan_xorsign_abs_f32 |
3 | 0 | 0 |
attr |
TL_RCP_MAX_FINITE_FLOAT8E4B8 |
1 | 0 | 0 |
func |
float_to_flex |
8 | 1 | 0 |
func |
max_finite |
2 | 0 | 0 |
func |
compute_scale |
3 | 0 | 0 |
func |
nan_propagating_absmax_reduce |
3 | 0 | 0 |
attr |
TL_MAX_FINITE_FLOAT16 |
1 | 0 | 0 |
attr |
TL_MAX_FINITE_FLOAT8E4B8 |
1 | 0 | 0 |
func |
clip |
3 | 0 | 0 |
func |
rcp_max_finite |
2 | 0 | 0 |
attr |
TL_MAX_FINITE_FLOAT8E4B15 |
1 | 0 | 0 |
vllm.third_party.triton_kernels.numerics_details.mxfp (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
downcast_to_mxfp_torch |
5 | 4 | 0 |
func |
upcast_from_mxfp_torch |
5 | 4 | 0 |
func |
upcast_from_mxfp |
5 | 4 | 0 |
func |
right_shift_unsigned |
3 | 0 | 0 |
func |
downcast_to_mxfp |
5 | 4 | 0 |
func |
get_max_quant_val |
2 | 1 | 0 |
func |
cvt_e2m1_to_fp32 |
2 | 0 | 0 |
vllm.third_party.triton_kernels.proton_opts (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
set_launch_metadata_allow_sync |
2 | 1 | 0 |
func |
launch_metadata_allow_sync |
1 | 0 | 0 |
vllm.third_party.triton_kernels.reduce (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
reduce_torch |
10 | 9 | 0 |
func |
compute_actual_scale |
4 | 0 | 0 |
attr |
specializations |
1 | 0 | 0 |
vllm.third_party.triton_kernels.roofline (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
write_csv |
4 | 0 | 0 |
func |
parse_profile |
3 | 0 | 0 |
func |
load_perf_csv |
2 | 0 | 0 |
func |
compute_roofline |
8 | 0 | 0 |
func |
get_cublas_tflops |
2 | 0 | 0 |
func |
plot_roofline |
10 | 0 | 0 |
attr |
parser |
1 | 0 | 0 |
attr |
args |
1 | 0 | 0 |
func |
validate_perfs |
2 | 0 | 0 |
func |
get_memset_tbps |
1 | 0 | 0 |
vllm.third_party.triton_kernels.specialize (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
specialize |
7 | 0 | 0 |
meth |
SpecializationModule.init |
4 | 3 | 0 |
meth |
SpecializationModule.get |
2 | 0 | 0 |
attr |
SpecializationModule.module_name |
1 | 0 | 0 |
attr |
SpecializationModule.kernels |
1 | 0 | 0 |
attr |
SpecializationModule.closure_args |
1 | 0 | 0 |
func |
cacheable |
2 | 0 | 0 |
func |
define_kernel |
5 | 0 | 0 |
meth |
FnSpecs.default |
1 | 0 | 0 |
vllm.third_party.triton_kernels.swiglu (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
swiglu_torch |
4 | 0 | 0 |
func |
swiglu |
5 | 0 | 0 |
meth |
SwiGLU.forward |
6 | 0 | 0 |
vllm.third_party.triton_kernels.target_info (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
num_sms |
1 | 0 | 0 |
func |
has_native_mxfp |
1 | 0 | 0 |
func |
get_cdna_version |
1 | 0 | 0 |
func |
has_tma_gather |
1 | 0 | 0 |
vllm.third_party.triton_kernels.tensor (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FloatType.post_init |
1 | 0 | 0 |
meth |
SparseMatrix.post_init |
1 | 0 | 0 |
func |
bitwidth |
2 | 1 | 0 |
meth |
Tensor.post_init |
1 | 0 | 0 |
meth |
Tensor.stride |
2 | 0 | 0 |
meth |
Tensor.data_ptr |
1 | 0 | 0 |
meth |
Tensor.numel |
1 | 0 | 0 |
meth |
Tensor.element_size |
1 | 0 | 0 |
meth |
Tensor.dim |
1 | 0 | 0 |
meth |
Tensor.size |
2 | 0 | 0 |
prop |
Tensor.ndim |
1 | 0 | 0 |
prop |
Tensor.device |
1 | 0 | 0 |
prop |
Tensor.data |
1 | 0 | 0 |
func |
convert_layout |
4 | 2 | 0 |
meth |
Bitmatrix.post_init |
1 | 0 | 0 |
func |
wrap_torch_tensor |
3 | 0 | 0 |
meth |
Storage.post_init |
1 | 0 | 0 |
meth |
Storage.is_tma_compliant |
1 | 0 | 0 |
meth |
Storage.make_dense_tma |
3 | 0 | 0 |
meth |
Storage.make_tma |
4 | 0 | 0 |
prop |
Storage.device |
1 | 0 | 0 |
attr |
BIT |
1 | 0 | 0 |
attr |
FP4 |
1 | 0 | 0 |
func |
get_layout |
2 | 1 | 0 |
vllm.third_party.triton_kernels.testing (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
assert_equal |
3 | 0 | 0 |
func |
compute_sanitizer |
2 | 0 | 0 |
func |
assert_close |
7 | 0 | 0 |
func |
compute_actual_scale |
4 | 0 | 0 |
vllm.third_party.triton_kernels.topk (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
make_empty |
6 | 0 | 0 |
func |
topk_forward |
8 | 0 | 0 |
meth |
TopK.forward |
9 | 0 | 0 |
meth |
TopK.backward |
5 | 0 | 0 |
func |
topk_torch |
7 | 5 | 0 |
func |
topk_backward |
7 | 0 | 0 |
func |
topk |
8 | 7 | 0 |
vllm.tokenizers.deepseek_v32 (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekV32Tokenizer.from_pretrained |
3 | 1 | 0 |
vllm.tokenizers.deepseek_v32_encoding (8 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
tool_calls_to_openai_format |
2 | 0 | 0 |
func |
tool_calls_from_openai_format |
2 | 0 | 0 |
func |
parse_tool_calls |
3 | 2 | 0 |
func |
parse_message_from_completion_text |
3 | 2 | 0 |
func |
to_json |
2 | 2 | 1 |
func |
tools_from_openai_format |
2 | 0 | 0 |
vllm.tokenizers.grok2 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
RESERVED_TOKEN_TEXTS |
1 | 0 | 0 |
meth |
Grok2Tokenizer.from_pretrained |
7 | 5 | 0 |
meth |
Grok2Tokenizer.apply_chat_template |
6 | 5 | 0 |
attr |
Grok2Tokenizer.name_or_path |
1 | 0 | 0 |
attr |
Grok2Tokenizer.init_kwargs |
1 | 0 | 0 |
attr |
CONTROL_TOKEN_TEXTS |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tokenizers.hf (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CachedHfTokenizer.from_pretrained |
7 | 5 | 0 |
vllm.tokenizers.mistral (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
validate_request_params |
2 | 1 | 0 |
meth |
MistralTokenizer.from_pretrained |
7 | 5 | 0 |
meth |
MistralTokenizer.apply_chat_template |
4 | 3 | 0 |
attr |
MistralTokenizer.transformers_tokenizer |
1 | 0 | 0 |
attr |
MistralTokenizer.mistral |
1 | 0 | 0 |
attr |
MistralTokenizer.instruct |
1 | 0 | 0 |
attr |
MistralTokenizer.tokenizer |
1 | 0 | 0 |
attr |
MistralTokenizer.is_tekken |
1 | 0 | 0 |
attr |
MistralTokenizer.is_spm |
1 | 0 | 0 |
func |
maybe_serialize_tool_calls |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
truncate_tool_call_ids |
2 | 1 | 0 |
vllm.tokenizers.qwen_vl (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QwenVLTokenizer.from_pretrained |
3 | 1 | 0 |
vllm.tokenizers.registry (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
cached_tokenizer_from_config |
3 | 1 | 0 |
attr |
TokenizerRegistry |
1 | 0 | 0 |
func |
get_tokenizer |
8 | 6 | 0 |
attr |
cached_get_tokenizer |
1 | 0 | 0 |
func |
resolve_tokenizer_args |
6 | 3 | 0 |
func |
tokenizer_args_from_config |
3 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
cached_resolve_tokenizer_args |
1 | 0 | 0 |
vllm.tool_parsers.abstract_tool_parser (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ToolParser.init |
2 | 1 | 0 |
attr |
ToolParser.model_tokenizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.deepseekv31_tool_parser (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepSeekV31ToolParser.init |
2 | 1 | 0 |
attr |
DeepSeekV31ToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
DeepSeekV31ToolParser.stream_tool_call_portion_regex |
1 | 0 | 0 |
attr |
DeepSeekV31ToolParser.stream_tool_call_name_regex |
1 | 0 | 0 |
attr |
DeepSeekV31ToolParser.tool_calls_start_token_id |
1 | 0 | 0 |
attr |
DeepSeekV31ToolParser.tool_calls_end_token_id |
1 | 0 | 0 |
attr |
DeepSeekV31ToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
DeepSeekV31ToolParser.tool_call_end_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.deepseekv32_tool_parser (8 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepSeekV32ToolParser.init |
2 | 1 | 0 |
meth |
DeepSeekV32ToolParser.adjust_request |
2 | 0 | 0 |
meth |
DeepSeekV32ToolParser._reset_streaming_state |
1 | 0 | 0 |
meth |
DeepSeekV32ToolParser._convert_param_value |
3 | 3 | 1 |
attr |
DeepSeekV32ToolParser.tool_call_complete_regex |
1 | 0 | 0 |
attr |
DeepSeekV32ToolParser.invoke_complete_regex |
1 | 0 | 0 |
attr |
DeepSeekV32ToolParser.parameter_complete_regex |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.deepseekv3_tool_parser (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepSeekV3ToolParser.init |
2 | 1 | 0 |
attr |
DeepSeekV3ToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
DeepSeekV3ToolParser.stream_tool_call_portion_regex |
1 | 0 | 0 |
attr |
DeepSeekV3ToolParser.stream_tool_call_name_regex |
1 | 0 | 0 |
attr |
DeepSeekV3ToolParser.tool_calls_start_token_id |
1 | 0 | 0 |
attr |
DeepSeekV3ToolParser.tool_calls_end_token_id |
1 | 0 | 0 |
attr |
DeepSeekV3ToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
DeepSeekV3ToolParser.tool_call_end_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.ernie45_tool_parser (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Ernie45ToolParser.init |
2 | 1 | 0 |
attr |
Ernie45ToolParser.current_tool_name_sent |
1 | 0 | 0 |
attr |
Ernie45ToolParser.current_tool_id |
1 | 0 | 0 |
attr |
Ernie45ToolParser.think_end_token |
1 | 0 | 0 |
attr |
Ernie45ToolParser.tool_call_start_token |
1 | 0 | 0 |
attr |
Ernie45ToolParser.tool_call_end_token |
1 | 0 | 0 |
attr |
Ernie45ToolParser.tool_calls_start_token |
1 | 0 | 0 |
attr |
Ernie45ToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
Ernie45ToolParser.think_end_token_id |
1 | 0 | 0 |
attr |
Ernie45ToolParser.response_start_token_id |
1 | 0 | 0 |
attr |
Ernie45ToolParser.response_end_token_id |
1 | 0 | 0 |
attr |
Ernie45ToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
Ernie45ToolParser.tool_call_end_token_id |
1 | 0 | 0 |
attr |
Ernie45ToolParser.newline_token_id |
1 | 0 | 0 |
attr |
Ernie45ToolParser.parser_token_ids |
1 | 0 | 0 |
vllm.tool_parsers.functiongemma_tool_parser (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FunctionGemmaToolParser.init |
2 | 1 | 0 |
attr |
FunctionGemmaToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
FunctionGemmaToolParser.arg_regex |
1 | 0 | 0 |
attr |
FunctionGemmaToolParser.buffered_delta_text |
1 | 0 | 0 |
attr |
FunctionGemmaToolParser.tool_call_start_token_ids |
1 | 0 | 0 |
attr |
FunctionGemmaToolParser.tool_call_end_token_ids |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.gigachat3_tool_parser (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
REGEX_FUNCTION_CALL |
1 | 0 | 0 |
attr |
ARGS_REGEX |
1 | 0 | 0 |
meth |
GigaChat3ToolParser.init |
2 | 1 | 0 |
attr |
GigaChat3ToolParser.trigger_start |
1 | 0 | 0 |
attr |
NAME_REGEX |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.glm47_moe_tool_parser (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Glm47MoeModelToolParser.init |
2 | 1 | 0 |
attr |
Glm47MoeModelToolParser.func_detail_regex |
1 | 0 | 0 |
attr |
Glm47MoeModelToolParser.func_arg_regex |
1 | 0 | 0 |
vllm.tool_parsers.glm4_moe_tool_parser (8 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Glm4MoeModelToolParser.init |
2 | 1 | 0 |
meth |
Glm4MoeModelToolParser._deserialize |
2 | 2 | 1 |
attr |
Glm4MoeModelToolParser.tool_calls_start_token |
1 | 0 | 0 |
attr |
Glm4MoeModelToolParser.func_call_regex |
1 | 0 | 0 |
attr |
Glm4MoeModelToolParser.func_detail_regex |
1 | 0 | 0 |
attr |
Glm4MoeModelToolParser.func_arg_regex |
1 | 0 | 0 |
attr |
Glm4MoeModelToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
Glm4MoeModelToolParser.tool_call_end_token_id |
1 | 0 | 0 |
vllm.tool_parsers.granite_20b_fc_tool_parser (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Granite20bFCToolParser.init |
2 | 1 | 0 |
attr |
Granite20bFCToolParser.bot_token |
1 | 0 | 0 |
attr |
Granite20bFCToolParser.tool_start_token |
1 | 0 | 0 |
attr |
Granite20bFCToolParser.tool_call_regex |
1 | 0 | 0 |
vllm.tool_parsers.granite_tool_parser (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteToolParser.init |
2 | 1 | 0 |
attr |
GraniteToolParser.bot_token |
1 | 0 | 0 |
attr |
GraniteToolParser.bot_string |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.hermes_tool_parser (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Hermes2ProToolParser.init |
2 | 1 | 0 |
meth |
Hermes2ProToolParser.tool_call_delta_buffer |
2 | 1 | 0 |
attr |
Hermes2ProToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
Hermes2ProToolParser.scratch_pad_regex |
1 | 0 | 0 |
attr |
Hermes2ProToolParser.tool_call_start_token_ids |
1 | 0 | 0 |
attr |
Hermes2ProToolParser.tool_call_end_token_ids |
1 | 0 | 0 |
attr |
Hermes2ProToolParser.tool_call_start_token_array |
1 | 0 | 0 |
attr |
Hermes2ProToolParser.tool_call_end_token_array |
1 | 0 | 0 |
attr |
Hermes2ProToolParser.buffered_delta_text |
1 | 0 | 0 |
attr |
Hermes2ProToolParser.model_tokenizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.hunyuan_a13b_tool_parser (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
HunyuanA13BToolParser.init |
2 | 1 | 0 |
meth |
HunyuanA13BToolParser._try_parse_json_tools |
2 | 1 | 0 |
meth |
HunyuanA13BToolParser._handle_test_compatibility |
2 | 1 | 0 |
meth |
HunyuanA13BToolParser._ensure_state_arrays |
2 | 1 | 0 |
meth |
HunyuanA13BToolParser._handle_tool_name_streaming |
4 | 2 | 0 |
meth |
HunyuanA13BToolParser._handle_tool_args_streaming |
4 | 3 | 0 |
attr |
HunyuanA13BToolParser.current_tool_id |
1 | 0 | 0 |
attr |
HunyuanA13BToolParser.current_tool_name_sent |
1 | 0 | 0 |
attr |
HunyuanA13BToolParser.prev_tool_call_arr |
1 | 0 | 0 |
attr |
HunyuanA13BToolParser.answer_tool_calls_pattern |
1 | 0 | 0 |
attr |
HunyuanA13BToolParser.tool_name_reg |
1 | 0 | 0 |
attr |
HunyuanA13BToolParser.tool_empty_arg_reg |
1 | 0 | 0 |
attr |
HunyuanA13BToolParser.tool_non_empty_arg_reg |
1 | 0 | 0 |
attr |
HunyuanA13BToolParser.bot_string |
1 | 0 | 0 |
vllm.tool_parsers.internlm2_tool_parser (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Internlm2ToolParser.init |
2 | 1 | 0 |
meth |
Internlm2ToolParser.get_arguments |
2 | 0 | 0 |
attr |
Internlm2ToolParser.position |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.jamba_tool_parser (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JambaToolParser.init |
2 | 1 | 0 |
attr |
JambaToolParser.tool_calls_regex |
1 | 0 | 0 |
attr |
JambaToolParser.tool_calls_start_token_id |
1 | 0 | 0 |
attr |
JambaToolParser.tool_calls_end_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.kimi_k2_tool_parser (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
KimiK2ToolParser.init |
2 | 1 | 0 |
attr |
KimiK2ToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
KimiK2ToolParser.stream_tool_call_portion_regex |
1 | 0 | 0 |
attr |
KimiK2ToolParser.stream_tool_call_name_regex |
1 | 0 | 0 |
attr |
KimiK2ToolParser.tool_calls_start_token_id |
1 | 0 | 0 |
attr |
KimiK2ToolParser.tool_calls_end_token_id |
1 | 0 | 0 |
attr |
KimiK2ToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
KimiK2ToolParser.tool_call_end_token_id |
1 | 0 | 0 |
vllm.tool_parsers.llama4_pythonic_tool_parser (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama4PythonicToolParser.init |
2 | 1 | 0 |
attr |
Llama4PythonicToolParser.TOOL_CALL_REGEX |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.llama_tool_parser (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama3JsonToolParser.init |
2 | 1 | 0 |
attr |
Llama3JsonToolParser.bot_token |
1 | 0 | 0 |
attr |
Llama3JsonToolParser.bot_token_id |
1 | 0 | 0 |
attr |
Llama3JsonToolParser.tool_call_start_regex |
1 | 0 | 0 |
attr |
Llama3JsonToolParser.json_decoder |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.longcat_tool_parser (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongcatFlashToolParser.init |
2 | 1 | 0 |
attr |
LongcatFlashToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
LongcatFlashToolParser.tool_call_start_token_ids |
1 | 0 | 0 |
attr |
LongcatFlashToolParser.tool_call_end_token_ids |
1 | 0 | 0 |
attr |
LongcatFlashToolParser.tool_call_start_token_array |
1 | 0 | 0 |
attr |
LongcatFlashToolParser.tool_call_end_token_array |
1 | 0 | 0 |
vllm.tool_parsers.minimax_m2_tool_parser (8 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MinimaxM2ToolParser.init |
2 | 1 | 0 |
meth |
MinimaxM2ToolParser._reset_streaming_state |
1 | 0 | 0 |
meth |
MinimaxM2ToolParser._convert_param_value |
3 | 3 | 1 |
meth |
MinimaxM2ToolParser._extract_types_from_schema |
2 | 2 | 1 |
meth |
MinimaxM2ToolParser._convert_param_value_with_types |
3 | 3 | 1 |
attr |
MinimaxM2ToolParser.tool_call_complete_regex |
1 | 0 | 0 |
attr |
MinimaxM2ToolParser.invoke_complete_regex |
1 | 0 | 0 |
attr |
MinimaxM2ToolParser.parameter_complete_regex |
1 | 0 | 0 |
attr |
MinimaxM2ToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
MinimaxM2ToolParser.tool_call_end_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.minimax_tool_parser (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MinimaxToolParser.init |
2 | 1 | 0 |
attr |
MinimaxToolParser.tool_call_start_token |
1 | 0 | 0 |
attr |
MinimaxToolParser.tool_call_end_token |
1 | 0 | 0 |
attr |
MinimaxToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
MinimaxToolParser.thinking_tag_pattern |
1 | 0 | 0 |
attr |
MinimaxToolParser.tool_name_pattern |
1 | 0 | 0 |
attr |
MinimaxToolParser.tool_args_pattern |
1 | 0 | 0 |
attr |
MinimaxToolParser.pending_buffer |
1 | 0 | 0 |
attr |
MinimaxToolParser.in_thinking_tag |
1 | 0 | 0 |
attr |
MinimaxToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
MinimaxToolParser.tool_call_end_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.mistral_tool_parser (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MistralToolParser.init |
2 | 1 | 0 |
meth |
MistralToolParser.update_stream_state_pre_v11_tokenizer |
1 | 0 | 0 |
meth |
MistralToolParser._split_delta |
8 | 7 | 0 |
attr |
MistralToolParser.starting_new_tool |
1 | 0 | 0 |
attr |
MistralToolParser.bot_token |
1 | 0 | 0 |
attr |
MistralToolParser.bot_token_id |
1 | 0 | 0 |
attr |
MistralToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
MistralToolParser.parse_coro |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
MistralToolCall.generate_random_id |
1 | 0 | 0 |
vllm.tool_parsers.olmo3_tool_parser (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo3PythonicToolParser.init |
2 | 1 | 0 |
attr |
Olmo3PythonicToolParser.TOOL_CALL_REGEX |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.openai_tool_parser (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OpenAIToolParser.init |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.phi4mini_tool_parser (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.pythonic_tool_parser (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PythonicToolParser.init |
2 | 1 | 0 |
attr |
PythonicToolParser.TOOL_CALL_REGEX |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.qwen3coder_tool_parser (9 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3CoderToolParser.init |
2 | 1 | 0 |
meth |
Qwen3CoderToolParser._reset_streaming_state |
1 | 0 | 0 |
meth |
Qwen3CoderToolParser._convert_param_value |
5 | 5 | 1 |
attr |
Qwen3CoderToolParser.tool_call_complete_regex |
1 | 0 | 0 |
attr |
Qwen3CoderToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
Qwen3CoderToolParser.tool_call_function_regex |
1 | 0 | 0 |
attr |
Qwen3CoderToolParser.tool_call_parameter_regex |
1 | 0 | 0 |
attr |
Qwen3CoderToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
Qwen3CoderToolParser.tool_call_end_token_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tool_parsers.qwen3xml_tool_parser (13 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3XMLToolParser.init |
2 | 1 | 0 |
attr |
Qwen3XMLToolParser.parser |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
StreamingXMLToolCallParser.init |
1 | 0 | 0 |
meth |
StreamingXMLToolCallParser.reset_streaming_state |
1 | 0 | 0 |
meth |
StreamingXMLToolCallParser._emit_delta |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser._auto_close_open_parameter_if_needed |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser._start_element |
3 | 2 | 0 |
meth |
StreamingXMLToolCallParser._char_data |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser._end_element |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser.setup_parser |
1 | 0 | 0 |
meth |
StreamingXMLToolCallParser.set_tools |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser._convert_param_value |
3 | 3 | 1 |
meth |
StreamingXMLToolCallParser._convert_for_json_streaming |
3 | 3 | 1 |
meth |
StreamingXMLToolCallParser._reset_xml_parser_after_tool_call |
1 | 0 | 0 |
vllm.tool_parsers.seed_oss_tool_parser (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
SeedOssToolParser.init |
2 | 1 | 0 |
meth |
SeedOssToolParser._reset_streaming_state |
1 | 0 | 0 |
attr |
SeedOssToolParser.tool_call_start_token_id |
1 | 0 | 0 |
attr |
SeedOssToolParser.tool_call_end_token_id |
1 | 0 | 0 |
attr |
SeedOssToolParser.think_end_token_id |
1 | 0 | 0 |
attr |
SeedOssToolParser.tool_call_complete_regex |
1 | 0 | 0 |
attr |
SeedOssToolParser.tool_call_regex |
1 | 0 | 0 |
attr |
SeedOssToolParser.tool_call_function_regex |
1 | 0 | 0 |
attr |
SeedOssToolParser.tool_call_parameter_regex |
1 | 0 | 0 |
vllm.tool_parsers.step3_tool_parser (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Step3ToolParser.init |
2 | 1 | 0 |
attr |
Step3ToolParser.position |
1 | 0 | 0 |
attr |
Step3ToolParser.tool_block_started |
1 | 0 | 0 |
attr |
Step3ToolParser.tool_block_finished |
1 | 0 | 0 |
vllm.tool_parsers.step3p5_tool_parser (13 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Step3p5ToolParser.init |
2 | 1 | 0 |
attr |
Step3p5ToolParser.parser |
1 | 0 | 0 |
meth |
StreamingXMLToolCallParser.init |
1 | 0 | 0 |
meth |
StreamingXMLToolCallParser.reset_streaming_state |
1 | 0 | 0 |
meth |
StreamingXMLToolCallParser._emit_delta |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser._auto_close_open_parameter_if_needed |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser._start_element |
3 | 2 | 0 |
meth |
StreamingXMLToolCallParser._char_data |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser._end_element |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser.setup_parser |
1 | 0 | 0 |
meth |
StreamingXMLToolCallParser.set_tools |
2 | 1 | 0 |
meth |
StreamingXMLToolCallParser._convert_param_value |
3 | 3 | 1 |
meth |
StreamingXMLToolCallParser._convert_for_json_streaming |
3 | 3 | 1 |
meth |
StreamingXMLToolCallParser._reset_xml_parser_after_tool_call |
1 | 0 | 0 |
vllm.tool_parsers.xlam_tool_parser (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
xLAMToolParser.init |
2 | 1 | 0 |
attr |
xLAMToolParser.current_tool_id |
1 | 0 | 0 |
attr |
xLAMToolParser.current_tool_name_sent |
1 | 0 | 0 |
attr |
xLAMToolParser.prev_tool_call_arr |
1 | 0 | 0 |
attr |
xLAMToolParser.json_code_block_patterns |
1 | 0 | 0 |
attr |
xLAMToolParser.thinking_tag_pattern |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.tracing (4 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
init_tracer |
4 | 3 | 0 |
func |
instrument_manual |
7 | 6 | 2 |
func |
instrument |
5 | 4 | 0 |
func |
maybe_init_worker_tracer |
4 | 3 | 0 |
vllm.tracing.otel (10 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_span_exporter |
2 | 0 | 0 |
func |
instrument_otel |
5 | 0 | 0 |
func |
init_otel_worker_tracer |
4 | 4 | 1 |
func |
init_otel_tracer |
4 | 4 | 1 |
func |
propagate_trace_to_env |
1 | 0 | 0 |
func |
manual_instrument_otel |
7 | 6 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.tracing.utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.chat_templates.registry (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.config (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_config |
9 | 8 | 0 |
func |
try_get_safetensors_metadata |
3 | 2 | 0 |
func |
get_hf_text_config |
2 | 1 | 0 |
func |
parse_pooling_type |
2 | 1 | 0 |
meth |
HFConfigParser.parse |
6 | 5 | 0 |
meth |
LazyConfigDict.getitem |
2 | 0 | 0 |
meth |
MistralConfigParser.parse |
6 | 5 | 0 |
func |
register_config_parser |
2 | 1 | 0 |
func |
get_hf_image_processor_config |
5 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
maybe_override_with_speculators |
7 | 6 | 0 |
vllm.transformers_utils.config_parser_base (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConfigParserBase.parse |
6 | 5 | 0 |
vllm.transformers_utils.configs.AXK1 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AXK1Config.init |
44 | 42 | 0 |
attr |
AXK1Config.vocab_size |
1 | 0 | 0 |
attr |
AXK1Config.max_position_embeddings |
1 | 0 | 0 |
attr |
AXK1Config.hidden_size |
1 | 0 | 0 |
attr |
AXK1Config.intermediate_size |
1 | 0 | 0 |
attr |
AXK1Config.moe_intermediate_size |
1 | 0 | 0 |
attr |
AXK1Config.num_hidden_layers |
1 | 0 | 0 |
attr |
AXK1Config.num_nextn_predict_layers |
1 | 0 | 0 |
attr |
AXK1Config.num_attention_heads |
1 | 0 | 0 |
attr |
AXK1Config.n_shared_experts |
1 | 0 | 0 |
attr |
AXK1Config.n_routed_experts |
1 | 0 | 0 |
attr |
AXK1Config.ep_size |
1 | 0 | 0 |
attr |
AXK1Config.routed_scaling_factor |
1 | 0 | 0 |
attr |
AXK1Config.kv_lora_rank |
1 | 0 | 0 |
attr |
AXK1Config.q_lora_rank |
1 | 0 | 0 |
attr |
AXK1Config.qk_rope_head_dim |
1 | 0 | 0 |
attr |
AXK1Config.v_head_dim |
1 | 0 | 0 |
attr |
AXK1Config.qk_nope_head_dim |
1 | 0 | 0 |
attr |
AXK1Config.topk_method |
1 | 0 | 0 |
attr |
AXK1Config.n_group |
1 | 0 | 0 |
attr |
AXK1Config.topk_group |
1 | 0 | 0 |
attr |
AXK1Config.num_experts_per_tok |
1 | 0 | 0 |
attr |
AXK1Config.moe_layer_freq |
1 | 0 | 0 |
attr |
AXK1Config.first_k_dense_replace |
1 | 0 | 0 |
attr |
AXK1Config.norm_topk_prob |
1 | 0 | 0 |
attr |
AXK1Config.scoring_func |
1 | 0 | 0 |
attr |
AXK1Config.aux_loss_alpha |
1 | 0 | 0 |
attr |
AXK1Config.seq_aux |
1 | 0 | 0 |
attr |
AXK1Config.num_key_value_heads |
1 | 0 | 0 |
attr |
AXK1Config.hidden_act |
1 | 0 | 0 |
attr |
AXK1Config.initializer_range |
1 | 0 | 0 |
attr |
AXK1Config.rms_norm_eps |
1 | 0 | 0 |
attr |
AXK1Config.pretraining_tp |
1 | 0 | 0 |
attr |
AXK1Config.use_cache |
1 | 0 | 0 |
attr |
AXK1Config.rope_theta |
1 | 0 | 0 |
attr |
AXK1Config.rope_scaling |
1 | 0 | 0 |
attr |
AXK1Config.rope_parameters |
1 | 0 | 0 |
attr |
AXK1Config.attention_bias |
1 | 0 | 0 |
attr |
AXK1Config.attention_dropout |
1 | 0 | 0 |
vllm.transformers_utils.configs (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
JAISConfig |
1 | 0 | 0 |
attr |
BagelConfig |
1 | 0 | 0 |
attr |
DeepseekV3Config |
1 | 0 | 0 |
attr |
FunAudioChatConfig |
1 | 0 | 0 |
attr |
Olmo3Config |
1 | 0 | 0 |
attr |
ColQwen3Config |
1 | 0 | 0 |
attr |
AXK1Config |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig |
1 | 0 | 0 |
attr |
SpeculatorsConfig |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig |
1 | 0 | 0 |
attr |
EAGLEConfig |
1 | 0 | 0 |
attr |
IsaacConfig |
1 | 0 | 0 |
attr |
PixelShuffleSiglip2VisionConfig |
1 | 0 | 0 |
attr |
ChatGLMConfig |
1 | 0 | 0 |
attr |
RWConfig |
1 | 0 | 0 |
attr |
Step3VLConfig |
1 | 0 | 0 |
attr |
Tarsier2Config |
1 | 0 | 0 |
attr |
Qwen3_5Config |
1 | 0 | 0 |
attr |
ColModernVBertConfig |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig |
1 | 0 | 0 |
attr |
Qwen3VLNemotronEmbedConfig |
1 | 0 | 0 |
attr |
DeepseekVLV2Config |
1 | 0 | 0 |
attr |
KimiVLConfig |
1 | 0 | 0 |
attr |
AfmoeConfig |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig |
1 | 0 | 0 |
attr |
NemotronConfig |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig |
1 | 0 | 0 |
attr |
UltravoxConfig |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig |
1 | 0 | 0 |
attr |
Qwen3NextConfig |
1 | 0 | 0 |
attr |
OpsColQwen3Config |
1 | 0 | 0 |
attr |
Lfm2MoeConfig |
1 | 0 | 0 |
attr |
NemotronHConfig |
1 | 0 | 0 |
attr |
RadioConfig |
1 | 0 | 0 |
attr |
MoonViTConfig |
1 | 0 | 0 |
attr |
KimiLinearConfig |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig |
1 | 0 | 0 |
attr |
MedusaConfig |
1 | 0 | 0 |
attr |
MiDashengLMConfig |
1 | 0 | 0 |
attr |
Step3p5Config |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig |
1 | 0 | 0 |
attr |
KimiK25Config |
1 | 0 | 0 |
attr |
Qwen3ASRConfig |
1 | 0 | 0 |
attr |
HunYuanVLConfig |
1 | 0 | 0 |
attr |
OvisConfig |
1 | 0 | 0 |
attr |
FlexOlmoConfig |
1 | 0 | 0 |
attr |
DotsOCRConfig |
1 | 0 | 0 |
attr |
Step3TextConfig |
1 | 0 | 0 |
vllm.transformers_utils.configs.afmoe (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AfmoeConfig.init |
34 | 32 | 0 |
attr |
AfmoeConfig.vocab_size |
1 | 0 | 0 |
attr |
AfmoeConfig.hidden_size |
1 | 0 | 0 |
attr |
AfmoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
AfmoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
AfmoeConfig.num_dense_layers |
1 | 0 | 0 |
attr |
AfmoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
AfmoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
AfmoeConfig.head_dim |
1 | 0 | 0 |
attr |
AfmoeConfig.hidden_act |
1 | 0 | 0 |
attr |
AfmoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
AfmoeConfig.initializer_range |
1 | 0 | 0 |
attr |
AfmoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
AfmoeConfig.use_cache |
1 | 0 | 0 |
attr |
AfmoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
AfmoeConfig.rope_scaling |
1 | 0 | 0 |
attr |
AfmoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
AfmoeConfig.num_experts |
1 | 0 | 0 |
attr |
AfmoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
AfmoeConfig.num_shared_experts |
1 | 0 | 0 |
attr |
AfmoeConfig.num_expert_groups |
1 | 0 | 0 |
attr |
AfmoeConfig.num_limited_groups |
1 | 0 | 0 |
attr |
AfmoeConfig.score_func |
1 | 0 | 0 |
attr |
AfmoeConfig.route_norm |
1 | 0 | 0 |
attr |
AfmoeConfig.route_scale |
1 | 0 | 0 |
attr |
AfmoeConfig.global_attn_every_n_layers |
1 | 0 | 0 |
attr |
AfmoeConfig.sliding_window |
1 | 0 | 0 |
attr |
AfmoeConfig.layer_types |
1 | 0 | 0 |
attr |
AfmoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
AfmoeConfig.mup_enabled |
1 | 0 | 0 |
attr |
AfmoeConfig.n_group |
1 | 0 | 0 |
attr |
AfmoeConfig.topk_group |
1 | 0 | 0 |
vllm.transformers_utils.configs.arctic (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ArcticConfig.init |
31 | 1 | 0 |
meth |
ArcticConfig.from_dict |
3 | 2 | 0 |
attr |
ArcticConfig.vocab_size |
1 | 0 | 0 |
attr |
ArcticConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ArcticConfig.hidden_size |
1 | 0 | 0 |
attr |
ArcticConfig.intermediate_size |
1 | 0 | 0 |
attr |
ArcticConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ArcticConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ArcticConfig.sliding_window |
1 | 0 | 0 |
attr |
ArcticConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ArcticConfig.hidden_act |
1 | 0 | 0 |
attr |
ArcticConfig.initializer_range |
1 | 0 | 0 |
attr |
ArcticConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
ArcticConfig.use_cache |
1 | 0 | 0 |
attr |
ArcticConfig.rope_parameters |
1 | 0 | 0 |
attr |
ArcticConfig.attention_dropout |
1 | 0 | 0 |
attr |
ArcticConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
ArcticConfig.num_local_experts |
1 | 0 | 0 |
attr |
ArcticConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
ArcticConfig.moe_layer_frequency |
1 | 0 | 0 |
attr |
ArcticConfig.moe_train_capacity_factor |
1 | 0 | 0 |
attr |
ArcticConfig.moe_eval_capacity_factor |
1 | 0 | 0 |
attr |
ArcticConfig.enable_expert_tensor_parallelism |
1 | 0 | 0 |
attr |
ArcticConfig.moe_min_capacity |
1 | 0 | 0 |
attr |
ArcticConfig.moe_token_dropping |
1 | 0 | 0 |
attr |
ArcticConfig.parallel_attn_mlp_res |
1 | 0 | 0 |
attr |
ArcticConfig.quantization |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.configs.bagel (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BagelConfig.init |
13 | 11 | 0 |
attr |
BagelConfig.visual_gen |
1 | 0 | 0 |
attr |
BagelConfig.visual_und |
1 | 0 | 0 |
attr |
BagelConfig.vae_config |
1 | 0 | 0 |
attr |
BagelConfig.latent_patch_size |
1 | 0 | 0 |
attr |
BagelConfig.max_latent_size |
1 | 0 | 0 |
attr |
BagelConfig.vit_max_num_patch_per_side |
1 | 0 | 0 |
attr |
BagelConfig.connector_act |
1 | 0 | 0 |
attr |
BagelConfig.interpolate_pos |
1 | 0 | 0 |
attr |
BagelConfig.timestep_shift |
1 | 0 | 0 |
attr |
BagelConfig.llm_config |
1 | 0 | 0 |
attr |
BagelConfig.vit_config |
1 | 0 | 0 |
vllm.transformers_utils.configs.chatglm (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChatGLMConfig.init |
27 | 0 | 0 |
attr |
ChatGLMConfig.num_layers |
1 | 0 | 0 |
attr |
ChatGLMConfig.vocab_size |
1 | 0 | 0 |
attr |
ChatGLMConfig.padded_vocab_size |
1 | 0 | 0 |
attr |
ChatGLMConfig.hidden_size |
1 | 0 | 0 |
attr |
ChatGLMConfig.ffn_hidden_size |
1 | 0 | 0 |
attr |
ChatGLMConfig.kv_channels |
1 | 0 | 0 |
attr |
ChatGLMConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ChatGLMConfig.seq_length |
1 | 0 | 0 |
attr |
ChatGLMConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ChatGLMConfig.hidden_dropout |
1 | 0 | 0 |
attr |
ChatGLMConfig.attention_dropout |
1 | 0 | 0 |
attr |
ChatGLMConfig.layernorm_epsilon |
1 | 0 | 0 |
attr |
ChatGLMConfig.rmsnorm |
1 | 0 | 0 |
attr |
ChatGLMConfig.apply_residual_connection_post_layernorm |
1 | 0 | 0 |
attr |
ChatGLMConfig.post_layer_norm |
1 | 0 | 0 |
attr |
ChatGLMConfig.add_bias_linear |
1 | 0 | 0 |
attr |
ChatGLMConfig.add_qkv_bias |
1 | 0 | 0 |
attr |
ChatGLMConfig.bias_dropout_fusion |
1 | 0 | 0 |
attr |
ChatGLMConfig.multi_query_attention |
1 | 0 | 0 |
attr |
ChatGLMConfig.multi_query_group_num |
1 | 0 | 0 |
attr |
ChatGLMConfig.apply_query_key_layer_scaling |
1 | 0 | 0 |
attr |
ChatGLMConfig.attention_softmax_in_fp32 |
1 | 0 | 0 |
attr |
ChatGLMConfig.fp32_residual_connection |
1 | 0 | 0 |
attr |
ChatGLMConfig.quantization_bit |
1 | 0 | 0 |
attr |
ChatGLMConfig.pre_seq_len |
1 | 0 | 0 |
attr |
ChatGLMConfig.prefix_projection |
1 | 0 | 0 |
attr |
ChatGLMConfig.interleaved_qkv |
1 | 0 | 0 |
vllm.transformers_utils.configs.colmodernvbert (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColModernVBertConfig.init |
4 | 2 | 0 |
meth |
ColModernVBertConfig.get_text_config |
2 | 0 | 0 |
attr |
ColModernVBertConfig.embedding_dim |
1 | 0 | 0 |
attr |
ColModernVBertConfig.image_token_id |
1 | 0 | 0 |
attr |
ColModernVBertConfig.pixel_shuffle_factor |
1 | 0 | 0 |
attr |
ColModernVBertConfig.hidden_size |
1 | 0 | 0 |
attr |
ColModernVBertConfig.text_config |
1 | 0 | 0 |
attr |
ColModernVBertConfig.vision_config |
1 | 0 | 0 |
vllm.transformers_utils.configs.colqwen3 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColQwen3Config.init |
8 | 6 | 0 |
attr |
ColQwen3Config.embed_dim |
1 | 0 | 0 |
attr |
ColQwen3Config.dims |
1 | 0 | 0 |
attr |
ColQwen3Config.dim |
1 | 0 | 0 |
attr |
ColQwen3Config.projection_dim |
1 | 0 | 0 |
attr |
ColQwen3Config.colbert_dim |
1 | 0 | 0 |
attr |
ColQwen3Config.pooling |
1 | 0 | 0 |
vllm.transformers_utils.configs.deepseek_vl2 (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MlpProjectorConfig.init |
8 | 6 | 0 |
meth |
DeepseekVLV2Config.init |
5 | 3 | 0 |
attr |
DeepseekVLV2Config.model_type |
1 | 0 | 0 |
attr |
DeepseekVLV2Config.text_config |
1 | 0 | 0 |
attr |
DeepseekVLV2Config.vocab_size |
1 | 0 | 0 |
meth |
VisionEncoderConfig.init |
14 | 12 | 0 |
vllm.transformers_utils.configs.dotsocr (35 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DotsOCRConfig.init |
6 | 1 | 0 |
meth |
DotsOCRConfig.save_pretrained |
3 | 0 | 0 |
attr |
DotsOCRConfig.image_token_id |
1 | 0 | 0 |
attr |
DotsOCRConfig.video_token_id |
1 | 0 | 0 |
attr |
DotsOCRConfig.vision_config |
1 | 0 | 0 |
meth |
DotsVisionConfig.init |
19 | 12 | 1 |
attr |
DotsVisionConfig.embed_dim |
1 | 0 | 0 |
attr |
DotsVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
DotsVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
DotsVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DotsVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DotsVisionConfig.num_channels |
1 | 0 | 0 |
attr |
DotsVisionConfig.patch_size |
1 | 0 | 0 |
attr |
DotsVisionConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
DotsVisionConfig.temporal_patch_size |
1 | 0 | 0 |
attr |
DotsVisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
DotsVisionConfig.use_bias |
1 | 0 | 0 |
attr |
DotsVisionConfig.attn_implementation |
1 | 0 | 0 |
attr |
DotsVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
DotsVisionConfig.init_merger_std |
1 | 0 | 0 |
attr |
DotsVisionConfig.is_causal |
1 | 0 | 0 |
attr |
DotsVisionConfig.post_norm |
1 | 0 | 0 |
attr |
DotsVisionConfig.gradient_checkpointing |
1 | 0 | 0 |
vllm.transformers_utils.configs.eagle (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EAGLEConfig.init |
5 | 3 | 0 |
meth |
EAGLEConfig.from_pretrained |
3 | 2 | 0 |
attr |
EAGLEConfig.model |
1 | 0 | 0 |
attr |
EAGLEConfig.truncated_vocab_size |
1 | 0 | 0 |
vllm.transformers_utils.configs.extract_hidden_states (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExtractHiddenStatesConfig.init |
4 | 2 | 0 |
meth |
ExtractHiddenStatesConfig.from_pretrained |
3 | 2 | 0 |
vllm.transformers_utils.configs.falcon (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RWConfig.init |
19 | 1 | 0 |
prop |
RWConfig.head_dim |
1 | 0 | 0 |
prop |
RWConfig.rotary |
1 | 0 | 0 |
attr |
RWConfig.vocab_size |
1 | 0 | 0 |
attr |
RWConfig.hidden_size |
1 | 0 | 0 |
attr |
RWConfig.n_layer |
1 | 0 | 0 |
attr |
RWConfig.n_head |
1 | 0 | 0 |
attr |
RWConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
RWConfig.initializer_range |
1 | 0 | 0 |
attr |
RWConfig.use_cache |
1 | 0 | 0 |
attr |
RWConfig.hidden_dropout |
1 | 0 | 0 |
attr |
RWConfig.attention_dropout |
1 | 0 | 0 |
attr |
RWConfig.bos_token_id |
1 | 0 | 0 |
attr |
RWConfig.eos_token_id |
1 | 0 | 0 |
attr |
RWConfig.multi_query |
1 | 0 | 0 |
attr |
RWConfig.n_head_kv |
1 | 0 | 0 |
attr |
RWConfig.alibi |
1 | 0 | 0 |
attr |
RWConfig.bias |
1 | 0 | 0 |
attr |
RWConfig.parallel_attn |
1 | 0 | 0 |
attr |
RWConfig.new_decoder_architecture |
1 | 0 | 0 |
vllm.transformers_utils.configs.flex_olmo (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlexOlmoConfig.init |
25 | 1 | 0 |
attr |
FlexOlmoConfig.vocab_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FlexOlmoConfig.hidden_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.intermediate_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
FlexOlmoConfig.hidden_act |
1 | 0 | 0 |
attr |
FlexOlmoConfig.initializer_range |
1 | 0 | 0 |
attr |
FlexOlmoConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
FlexOlmoConfig.use_cache |
1 | 0 | 0 |
attr |
FlexOlmoConfig.rope_parameters |
1 | 0 | 0 |
attr |
FlexOlmoConfig.attention_bias |
1 | 0 | 0 |
attr |
FlexOlmoConfig.attention_dropout |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_experts |
1 | 0 | 0 |
attr |
FlexOlmoConfig.output_router_logits |
1 | 0 | 0 |
attr |
FlexOlmoConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
FlexOlmoConfig.norm_topk_prob |
1 | 0 | 0 |
vllm.transformers_utils.configs.funaudiochat (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FunAudioChatAudioEncoderConfig.init |
25 | 24 | 0 |
attr |
FunAudioChatAudioEncoderConfig.num_mel_bins |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.d_model |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.encoder_layers |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.dropout |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.activation_function |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.scale_embedding |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.max_source_positions |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.n_window |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.output_dim |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.codebook_size |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.continuous_features_mode |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.crq_transformer_config |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.group_size |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.enable_audio_invert_tower |
1 | 0 | 0 |
attr |
FunAudioChatAudioEncoderConfig.pad_token_id |
1 | 0 | 0 |
meth |
FunAudioChatConfig.init |
7 | 6 | 0 |
attr |
FunAudioChatConfig.audio_token_index |
1 | 0 | 0 |
attr |
FunAudioChatConfig.ignore_index |
1 | 0 | 0 |
attr |
FunAudioChatConfig.audio_config |
1 | 0 | 0 |
attr |
FunAudioChatConfig.text_config |
1 | 0 | 0 |
attr |
FunAudioChatConfig.hidden_size |
1 | 0 | 0 |
vllm.transformers_utils.configs.hunyuan_vl (114 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanVLVisionConfig.init |
26 | 0 | 0 |
attr |
HunYuanVLVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.interpolate_mode |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.learnable_mlp_pooling_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.num_channels |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.out_hidden_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.patch_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.remove_prenorm |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.temporal_patch_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.resize_resolution |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.img_max_token_num |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.max_image_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.min_image_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.video_max_image_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.video_min_image_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.anyres_vit_max_image_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.max_vit_seq_len |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.text_hidden_size |
1 | 0 | 0 |
attr |
HunYuanVLVisionConfig.num_key_value_heads |
1 | 0 | 0 |
meth |
HunYuanVLConfig.init |
10 | 0 | 0 |
meth |
HunYuanVLConfig.setattr |
3 | 0 | 0 |
meth |
HunYuanVLConfig.getattribute |
2 | 0 | 0 |
attr |
HunYuanVLConfig.image_token_id |
1 | 0 | 0 |
attr |
HunYuanVLConfig.im_start_id |
1 | 0 | 0 |
attr |
HunYuanVLConfig.im_end_id |
1 | 0 | 0 |
attr |
HunYuanVLConfig.im_newline_id |
1 | 0 | 0 |
attr |
HunYuanVLConfig.video_start_id |
1 | 0 | 0 |
attr |
HunYuanVLConfig.video_end_id |
1 | 0 | 0 |
attr |
HunYuanVLConfig.vision_config |
1 | 0 | 0 |
attr |
HunYuanVLConfig.text_config |
1 | 0 | 0 |
meth |
HunYuanVLTextConfig.init |
24 | 1 | 0 |
meth |
HunYuanVLTextConfig._rope_scaling_validation |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.vocab_size |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.hidden_size |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.head_dim |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.hidden_act |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.initializer_range |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.pretraining_tp |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.use_cache |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.rope_theta |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.rope_scaling |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.attention_bias |
1 | 0 | 0 |
attr |
HunYuanVLTextConfig.attention_dropout |
1 | 0 | 0 |
vllm.transformers_utils.configs.isaac (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixelShuffleSiglip2VisionConfig.init |
4 | 2 | 0 |
attr |
PixelShuffleSiglip2VisionConfig.pixel_shuffle_scale_factor |
1 | 0 | 0 |
attr |
PixelShuffleSiglip2VisionConfig.num_patches |
1 | 0 | 0 |
meth |
IsaacConfig.init |
11 | 7 | 0 |
attr |
IsaacConfig.video_patch_size |
1 | 0 | 0 |
attr |
IsaacConfig.vision_max_num_patches |
1 | 0 | 0 |
attr |
IsaacConfig.vision_min_num_patches |
1 | 0 | 0 |
attr |
IsaacConfig.pixel_shuffle_scale |
1 | 0 | 0 |
attr |
IsaacConfig.max_sequence_length |
1 | 0 | 0 |
attr |
IsaacConfig.vision_token |
1 | 0 | 0 |
attr |
IsaacConfig.vision_attn_implementation |
1 | 0 | 0 |
attr |
IsaacConfig.text_config |
1 | 0 | 0 |
attr |
IsaacConfig.vision_config |
1 | 0 | 0 |
vllm.transformers_utils.configs.jais (53 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JAISConfig.init |
27 | 0 | 0 |
meth |
JAISConfig._alibi_scaling_validation |
1 | 0 | 0 |
attr |
JAISConfig.vocab_size |
1 | 0 | 0 |
attr |
JAISConfig.n_positions |
1 | 0 | 0 |
attr |
JAISConfig.n_embd |
1 | 0 | 0 |
attr |
JAISConfig.n_layer |
1 | 0 | 0 |
attr |
JAISConfig.n_head |
1 | 0 | 0 |
attr |
JAISConfig.n_inner |
1 | 0 | 0 |
attr |
JAISConfig.activation_function |
1 | 0 | 0 |
attr |
JAISConfig.resid_pdrop |
1 | 0 | 0 |
attr |
JAISConfig.embd_pdrop |
1 | 0 | 0 |
attr |
JAISConfig.attn_pdrop |
1 | 0 | 0 |
attr |
JAISConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
JAISConfig.initializer_range |
1 | 0 | 0 |
attr |
JAISConfig.scale_attn_weights |
1 | 0 | 0 |
attr |
JAISConfig.use_cache |
1 | 0 | 0 |
attr |
JAISConfig.scale_attn_by_inverse_layer_idx |
1 | 0 | 0 |
attr |
JAISConfig.reorder_and_upcast_attn |
1 | 0 | 0 |
attr |
JAISConfig.bos_token_id |
1 | 0 | 0 |
attr |
JAISConfig.eos_token_id |
1 | 0 | 0 |
attr |
JAISConfig.position_embedding_type |
1 | 0 | 0 |
attr |
JAISConfig.mup_width_scale |
1 | 0 | 0 |
attr |
JAISConfig.mup_embeddings_scale |
1 | 0 | 0 |
attr |
JAISConfig.mup_output_alpha |
1 | 0 | 0 |
attr |
JAISConfig.mup_scale_qk_dot_by_d |
1 | 0 | 0 |
attr |
JAISConfig.alibi_scaling |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.configs.kimi_k25 (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KimiK25VisionConfig.init |
18 | 16 | 0 |
attr |
KimiK25VisionConfig.patch_size |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.init_pos_emb_height |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.init_pos_emb_width |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.init_pos_emb_time |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.pos_emb_type |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.merge_kernel_size |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.video_attn_type |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.merge_type |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.mm_projector_type |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.projector_ln_eps |
1 | 0 | 0 |
attr |
KimiK25VisionConfig.mm_hidden_size |
1 | 0 | 0 |
meth |
KimiK25Config.init |
9 | 7 | 0 |
attr |
KimiK25Config.ignore_index |
1 | 0 | 0 |
attr |
KimiK25Config.media_placeholder_token_id |
1 | 0 | 0 |
attr |
KimiK25Config.use_unified_vision_chunk |
1 | 0 | 0 |
attr |
KimiK25Config.video_placeholder |
1 | 0 | 0 |
attr |
KimiK25Config.quantization_config |
1 | 0 | 0 |
vllm.transformers_utils.configs.kimi_linear (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KimiLinearConfig.init |
39 | 20 | 0 |
meth |
KimiLinearConfig.is_kda_layer |
2 | 1 | 0 |
prop |
KimiLinearConfig.is_mla |
1 | 0 | 0 |
prop |
KimiLinearConfig.is_moe |
1 | 0 | 0 |
attr |
KimiLinearConfig.model_type |
1 | 0 | 0 |
attr |
KimiLinearConfig.vocab_size |
1 | 0 | 0 |
attr |
KimiLinearConfig.hidden_size |
1 | 0 | 0 |
attr |
KimiLinearConfig.head_dim |
1 | 0 | 0 |
attr |
KimiLinearConfig.intermediate_size |
1 | 0 | 0 |
attr |
KimiLinearConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
KimiLinearConfig.num_attention_heads |
1 | 0 | 0 |
attr |
KimiLinearConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
KimiLinearConfig.hidden_act |
1 | 0 | 0 |
attr |
KimiLinearConfig.initializer_range |
1 | 0 | 0 |
attr |
KimiLinearConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
KimiLinearConfig.use_cache |
1 | 0 | 0 |
attr |
KimiLinearConfig.rope_parameters |
1 | 0 | 0 |
attr |
KimiLinearConfig.q_lora_rank |
1 | 0 | 0 |
attr |
KimiLinearConfig.kv_lora_rank |
1 | 0 | 0 |
attr |
KimiLinearConfig.qk_nope_head_dim |
1 | 0 | 0 |
attr |
KimiLinearConfig.qk_rope_head_dim |
1 | 0 | 0 |
attr |
KimiLinearConfig.v_head_dim |
1 | 0 | 0 |
attr |
KimiLinearConfig.mla_use_nope |
1 | 0 | 0 |
attr |
KimiLinearConfig.num_experts |
1 | 0 | 0 |
attr |
KimiLinearConfig.num_experts_per_token |
1 | 0 | 0 |
attr |
KimiLinearConfig.moe_renormalize |
1 | 0 | 0 |
attr |
KimiLinearConfig.num_shared_experts |
1 | 0 | 0 |
attr |
KimiLinearConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
KimiLinearConfig.moe_router_activation_func |
1 | 0 | 0 |
attr |
KimiLinearConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
KimiLinearConfig.first_k_dense_replace |
1 | 0 | 0 |
attr |
KimiLinearConfig.moe_layer_freq |
1 | 0 | 0 |
attr |
KimiLinearConfig.use_grouped_topk |
1 | 0 | 0 |
attr |
KimiLinearConfig.num_expert_group |
1 | 0 | 0 |
attr |
KimiLinearConfig.topk_group |
1 | 0 | 0 |
attr |
KimiLinearConfig.num_nextn_predict_layers |
1 | 0 | 0 |
attr |
KimiLinearConfig.linear_attn_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.configs.kimi_vl (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KimiVLConfig.init |
7 | 5 | 0 |
attr |
KimiVLConfig.vision_config |
1 | 0 | 0 |
attr |
KimiVLConfig.text_config |
1 | 0 | 0 |
attr |
KimiVLConfig.ignore_index |
1 | 0 | 0 |
attr |
KimiVLConfig.media_placeholder_token_id |
1 | 0 | 0 |
vllm.transformers_utils.configs.lfm2_moe (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2MoeConfig.init |
26 | 24 | 0 |
attr |
Lfm2MoeConfig.vocab_size |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.hidden_size |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.use_cache |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.norm_eps |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.conv_bias |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.conv_L_cache |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_dense_layers |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_experts |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.use_expert_bias |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.layer_types |
1 | 0 | 0 |
vllm.transformers_utils.configs.medusa (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MedusaConfig.init |
9 | 7 | 0 |
meth |
MedusaConfig.from_pretrained |
3 | 2 | 0 |
prop |
MedusaConfig.num_attention_heads |
1 | 0 | 0 |
prop |
MedusaConfig.num_lookahead_tokens |
2 | 1 | 0 |
attr |
MedusaConfig.hidden_size |
1 | 0 | 0 |
attr |
MedusaConfig.vocab_size |
1 | 0 | 0 |
attr |
MedusaConfig.num_heads |
1 | 0 | 0 |
attr |
MedusaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MedusaConfig.max_paths |
1 | 0 | 0 |
attr |
MedusaConfig.topk |
1 | 0 | 0 |
attr |
MedusaConfig.max_seq_len |
1 | 0 | 0 |
attr |
MedusaConfig.truncated_vocab_size |
1 | 0 | 0 |
vllm.transformers_utils.configs.midashenglm (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DashengConfig.init |
23 | 21 | 0 |
attr |
DashengConfig.embed_dim |
1 | 0 | 0 |
attr |
DashengConfig.outputdim |
1 | 0 | 0 |
attr |
DashengConfig.patch_size |
1 | 0 | 0 |
attr |
DashengConfig.patch_stride |
1 | 0 | 0 |
attr |
DashengConfig.input_channels |
1 | 0 | 0 |
attr |
DashengConfig.target_length |
1 | 0 | 0 |
attr |
DashengConfig.depth |
1 | 0 | 0 |
attr |
DashengConfig.num_heads |
1 | 0 | 0 |
attr |
DashengConfig.mlp_ratio |
1 | 0 | 0 |
attr |
DashengConfig.qkv_bias |
1 | 0 | 0 |
attr |
DashengConfig.init_values |
1 | 0 | 0 |
attr |
DashengConfig.drop_rate |
1 | 0 | 0 |
attr |
DashengConfig.attn_drop_rate |
1 | 0 | 0 |
attr |
DashengConfig.f_min |
1 | 0 | 0 |
attr |
DashengConfig.f_max |
1 | 0 | 0 |
attr |
DashengConfig.center |
1 | 0 | 0 |
attr |
DashengConfig.win_length |
1 | 0 | 0 |
attr |
DashengConfig.hop_length |
1 | 0 | 0 |
attr |
DashengConfig.sample_rate |
1 | 0 | 0 |
attr |
DashengConfig.n_fft |
1 | 0 | 0 |
attr |
DashengConfig.n_mels |
1 | 0 | 0 |
meth |
MiDashengLMConfig.init |
6 | 4 | 0 |
attr |
MiDashengLMConfig.audio_encoder_config |
1 | 0 | 0 |
attr |
MiDashengLMConfig.subsample_factor |
1 | 0 | 0 |
attr |
MiDashengLMConfig.text_config |
1 | 0 | 0 |
attr |
MiDashengLMConfig.audio_token_id |
1 | 0 | 0 |
vllm.transformers_utils.configs.mistral (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.configs.mlp_speculator (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MLPSpeculatorConfig.init |
10 | 8 | 0 |
attr |
MLPSpeculatorConfig.vocab_size |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig.emb_dim |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig.inner_dim |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig.n_predict |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig.top_k_tokens_per_head |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig.n_candidates |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig.num_lookahead_tokens |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig.tie_weights |
1 | 0 | 0 |
attr |
MLPSpeculatorConfig.scale_input |
1 | 0 | 0 |
vllm.transformers_utils.configs.moonvit (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonViTConfig.init |
10 | 8 | 0 |
attr |
MoonViTConfig.patch_size |
1 | 0 | 0 |
attr |
MoonViTConfig.init_pos_emb_height |
1 | 0 | 0 |
attr |
MoonViTConfig.init_pos_emb_width |
1 | 0 | 0 |
attr |
MoonViTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MoonViTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MoonViTConfig.hidden_size |
1 | 0 | 0 |
attr |
MoonViTConfig.intermediate_size |
1 | 0 | 0 |
attr |
MoonViTConfig.merge_kernel_size |
1 | 0 | 0 |
vllm.transformers_utils.configs.nemotron (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
NemotronConfig.init |
22 | 0 | 0 |
meth |
NemotronConfig._rope_parameters_validation |
1 | 0 | 0 |
attr |
NemotronConfig.vocab_size |
1 | 0 | 0 |
attr |
NemotronConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
NemotronConfig.hidden_size |
1 | 0 | 0 |
attr |
NemotronConfig.intermediate_size |
1 | 0 | 0 |
attr |
NemotronConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
NemotronConfig.num_attention_heads |
1 | 0 | 0 |
attr |
NemotronConfig.head_dim |
1 | 0 | 0 |
attr |
NemotronConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
NemotronConfig.hidden_act |
1 | 0 | 0 |
attr |
NemotronConfig.initializer_range |
1 | 0 | 0 |
attr |
NemotronConfig.norm_eps |
1 | 0 | 0 |
attr |
NemotronConfig.use_cache |
1 | 0 | 0 |
attr |
NemotronConfig.rope_parameters |
1 | 0 | 0 |
attr |
NemotronConfig.attention_bias |
1 | 0 | 0 |
attr |
NemotronConfig.attention_dropout |
1 | 0 | 0 |
attr |
NemotronConfig.mlp_bias |
1 | 0 | 0 |
vllm.transformers_utils.configs.nemotron_h (105 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NemotronHConfig.init |
54 | 0 | 0 |
prop |
NemotronHConfig.layers_block_type |
1 | 0 | 0 |
attr |
NemotronHConfig.vocab_size |
1 | 0 | 0 |
attr |
NemotronHConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
NemotronHConfig.hidden_size |
1 | 0 | 0 |
attr |
NemotronHConfig.intermediate_size |
1 | 0 | 0 |
attr |
NemotronHConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
NemotronHConfig.hybrid_override_pattern |
1 | 0 | 0 |
attr |
NemotronHConfig.mtp_hybrid_override_pattern |
1 | 0 | 0 |
attr |
NemotronHConfig.num_attention_heads |
1 | 0 | 0 |
attr |
NemotronHConfig.head_dim |
1 | 0 | 0 |
attr |
NemotronHConfig.sliding_window |
1 | 0 | 0 |
attr |
NemotronHConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
NemotronHConfig.attention_dropout |
1 | 0 | 0 |
attr |
NemotronHConfig.hidden_dropout |
1 | 0 | 0 |
attr |
NemotronHConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
NemotronHConfig.mlp_hidden_act |
1 | 0 | 0 |
attr |
NemotronHConfig.attention_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.mlp_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.use_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.initializer_range |
1 | 0 | 0 |
attr |
NemotronHConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
NemotronHConfig.residual_in_fp32 |
1 | 0 | 0 |
attr |
NemotronHConfig.use_cache |
1 | 0 | 0 |
attr |
NemotronHConfig.num_logits_to_keep |
1 | 0 | 0 |
attr |
NemotronHConfig.use_mamba_kernels |
1 | 0 | 0 |
attr |
NemotronHConfig.n_groups |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_head_dim |
1 | 0 | 0 |
attr |
NemotronHConfig.ssm_state_size |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_num_heads |
1 | 0 | 0 |
attr |
NemotronHConfig.conv_kernel |
1 | 0 | 0 |
attr |
NemotronHConfig.expand |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_hidden_act |
1 | 0 | 0 |
attr |
NemotronHConfig.time_step_min |
1 | 0 | 0 |
attr |
NemotronHConfig.time_step_max |
1 | 0 | 0 |
attr |
NemotronHConfig.time_step_limit |
1 | 0 | 0 |
attr |
NemotronHConfig.time_step_floor |
1 | 0 | 0 |
attr |
NemotronHConfig.use_conv_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_proj_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.chunk_size |
1 | 0 | 0 |
attr |
NemotronHConfig.rescale_prenorm_residual |
1 | 0 | 0 |
attr |
NemotronHConfig.n_routed_experts |
1 | 0 | 0 |
attr |
NemotronHConfig.n_shared_experts |
1 | 0 | 0 |
attr |
NemotronHConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
NemotronHConfig.moe_shared_expert_intermediate_size |
1 | 0 | 0 |
attr |
NemotronHConfig.moe_latent_size |
1 | 0 | 0 |
attr |
NemotronHConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
NemotronHConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
NemotronHConfig.n_group |
1 | 0 | 0 |
attr |
NemotronHConfig.topk_group |
1 | 0 | 0 |
attr |
NemotronHConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.configs.olmo3 (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo3Config.init |
22 | 0 | 0 |
attr |
Olmo3Config.vocab_size |
1 | 0 | 0 |
attr |
Olmo3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Olmo3Config.hidden_size |
1 | 0 | 0 |
attr |
Olmo3Config.intermediate_size |
1 | 0 | 0 |
attr |
Olmo3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Olmo3Config.num_attention_heads |
1 | 0 | 0 |
attr |
Olmo3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Olmo3Config.hidden_act |
1 | 0 | 0 |
attr |
Olmo3Config.initializer_range |
1 | 0 | 0 |
attr |
Olmo3Config.use_cache |
1 | 0 | 0 |
attr |
Olmo3Config.rope_parameters |
1 | 0 | 0 |
attr |
Olmo3Config.attention_bias |
1 | 0 | 0 |
attr |
Olmo3Config.attention_dropout |
1 | 0 | 0 |
attr |
Olmo3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Olmo3Config.sliding_window |
1 | 0 | 0 |
attr |
Olmo3Config.layer_types |
1 | 0 | 0 |
vllm.transformers_utils.configs.ovis (48 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SiglipVisualTokenizerConfig.init |
2 | 0 | 0 |
attr |
SiglipVisualTokenizerConfig.drop_cls_token |
1 | 0 | 0 |
meth |
AIMv2Config.init |
14 | 13 | 1 |
attr |
AIMv2Config.hidden_size |
1 | 0 | 0 |
attr |
AIMv2Config.intermediate_size |
1 | 0 | 0 |
attr |
AIMv2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
AIMv2Config.num_attention_heads |
1 | 0 | 0 |
attr |
AIMv2Config.num_channels |
1 | 0 | 0 |
attr |
AIMv2Config.patch_size |
1 | 0 | 0 |
attr |
AIMv2Config.image_size |
1 | 0 | 0 |
attr |
AIMv2Config.attention_dropout |
1 | 0 | 0 |
attr |
AIMv2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
AIMv2Config.projection_dropout |
1 | 0 | 0 |
attr |
AIMv2Config.qkv_bias |
1 | 0 | 0 |
attr |
AIMv2Config.use_bias |
1 | 0 | 0 |
meth |
BaseVisualTokenizerConfig.init |
9 | 2 | 0 |
attr |
BaseVisualTokenizerConfig.vocab_size |
1 | 0 | 0 |
attr |
BaseVisualTokenizerConfig.tokenize_function |
1 | 0 | 0 |
attr |
BaseVisualTokenizerConfig.tau |
1 | 0 | 0 |
attr |
BaseVisualTokenizerConfig.depths |
1 | 0 | 0 |
attr |
BaseVisualTokenizerConfig.backbone_kwargs |
1 | 0 | 0 |
attr |
BaseVisualTokenizerConfig.drop_cls_token |
1 | 0 | 0 |
attr |
BaseVisualTokenizerConfig.backbone_config |
1 | 0 | 0 |
attr |
BaseVisualTokenizerConfig.hidden_stride |
1 | 0 | 0 |
meth |
Aimv2VisualTokenizerConfig.init |
2 | 0 | 0 |
attr |
Aimv2VisualTokenizerConfig.drop_cls_token |
1 | 0 | 0 |
meth |
OvisConfig.init |
9 | 2 | 0 |
attr |
OvisConfig.text_config |
1 | 0 | 0 |
attr |
OvisConfig.visual_tokenizer_config |
1 | 0 | 0 |
attr |
OvisConfig.multimodal_max_length |
1 | 0 | 0 |
attr |
OvisConfig.hidden_size |
1 | 0 | 0 |
attr |
OvisConfig.conversation_formatter_class |
1 | 0 | 0 |
attr |
OvisConfig.llm_attn_implementation |
1 | 0 | 0 |
attr |
OvisConfig.disable_tie_weight |
1 | 0 | 0 |
vllm.transformers_utils.configs.qwen3_5 (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5TextConfig.init |
27 | 0 | 0 |
attr |
Qwen3_5TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_conv_kernel_dim |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_key_head_dim |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_value_head_dim |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_num_key_heads |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_num_value_heads |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Qwen3_5Config.init |
9 | 0 | 0 |
attr |
Qwen3_5Config.image_token_id |
1 | 0 | 0 |
attr |
Qwen3_5Config.video_token_id |
1 | 0 | 0 |
attr |
Qwen3_5Config.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3_5Config.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen3_5Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5Config.vision_config |
1 | 0 | 0 |
attr |
Qwen3_5Config.text_config |
1 | 0 | 0 |
vllm.transformers_utils.configs.qwen3_5_moe (78 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5MoeTextConfig.init |
32 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_conv_kernel_dim |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_key_head_dim |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_value_head_dim |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_num_key_heads |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_num_value_heads |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.shared_expert_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_experts |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.output_router_logits |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Qwen3_5MoeConfig.init |
9 | 0 | 0 |
attr |
Qwen3_5MoeConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.text_config |
1 | 0 | 0 |
vllm.transformers_utils.configs.qwen3_asr (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3ASRAudioEncoderConfig.init |
19 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.num_mel_bins |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.d_model |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.encoder_layers |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.dropout |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.activation_function |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.scale_embedding |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.max_source_positions |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.n_window |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.output_dim |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.n_window_infer |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.conv_chunksize |
1 | 0 | 0 |
attr |
Qwen3ASRAudioEncoderConfig.downsample_hidden_size |
1 | 0 | 0 |
meth |
Qwen3ASRConfig.init |
4 | 0 | 0 |
meth |
Qwen3ASRConfig.get_text_config |
2 | 1 | 0 |
attr |
Qwen3ASRConfig.thinker_config |
1 | 0 | 0 |
attr |
Qwen3ASRConfig.support_languages |
1 | 0 | 0 |
meth |
Qwen3ASRThinkerConfig.init |
8 | 0 | 0 |
attr |
Qwen3ASRThinkerConfig.user_token_id |
1 | 0 | 0 |
attr |
Qwen3ASRThinkerConfig.audio_start_token_id |
1 | 0 | 0 |
attr |
Qwen3ASRThinkerConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3ASRThinkerConfig.audio_config |
1 | 0 | 0 |
attr |
Qwen3ASRThinkerConfig.text_config |
1 | 0 | 0 |
attr |
Qwen3ASRThinkerConfig.audio_token_id |
1 | 0 | 0 |
vllm.transformers_utils.configs.qwen3_next (64 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3NextConfig.init |
33 | 0 | 0 |
attr |
Qwen3NextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3NextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3NextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3NextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3NextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3NextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3NextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3NextConfig.partial_rotary_factor |
1 | 0 | 0 |
attr |
Qwen3NextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3NextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3NextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3NextConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_conv_kernel_dim |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_key_head_dim |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_value_head_dim |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_num_key_heads |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_num_value_heads |
1 | 0 | 0 |
attr |
Qwen3NextConfig.decoder_sparse_step |
1 | 0 | 0 |
attr |
Qwen3NextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.shared_expert_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_experts |
1 | 0 | 0 |
attr |
Qwen3NextConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Qwen3NextConfig.output_router_logits |
1 | 0 | 0 |
attr |
Qwen3NextConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3NextConfig.mlp_only_layers |
1 | 0 | 0 |
vllm.transformers_utils.configs.radio (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RadioConfig.init |
17 | 15 | 0 |
attr |
RadioConfig.model_name |
1 | 0 | 0 |
attr |
RadioConfig.image_size |
1 | 0 | 0 |
attr |
RadioConfig.patch_size |
1 | 0 | 0 |
attr |
RadioConfig.qkv_bias |
1 | 0 | 0 |
attr |
RadioConfig.qk_normalization |
1 | 0 | 0 |
attr |
RadioConfig.norm_type |
1 | 0 | 0 |
attr |
RadioConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
RadioConfig.initializer_factor |
1 | 0 | 0 |
attr |
RadioConfig.hidden_act |
1 | 0 | 0 |
attr |
RadioConfig.cpe_max_size |
1 | 0 | 0 |
attr |
RadioConfig.norm_mean |
1 | 0 | 0 |
attr |
RadioConfig.norm_std |
1 | 0 | 0 |
attr |
RadioConfig.register_multiple |
1 | 0 | 0 |
attr |
RadioConfig.teachers |
1 | 0 | 0 |
attr |
RadioConfig.cls_token_per_teacher |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.configs.speculators.algos (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
register_speculator |
2 | 0 | 0 |
vllm.transformers_utils.configs.speculators.base (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeculatorsConfig.from_pretrained |
3 | 2 | 0 |
vllm.transformers_utils.configs.step3_vl (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Step3VLConfig.init |
7 | 6 | 0 |
attr |
Step3VLConfig.vision_config |
1 | 0 | 0 |
attr |
Step3VLConfig.text_config |
1 | 0 | 0 |
attr |
Step3VLConfig.understand_projector_stride |
1 | 0 | 0 |
attr |
Step3VLConfig.projector_bias |
1 | 0 | 0 |
attr |
Step3VLConfig.hidden_size |
1 | 0 | 0 |
attr |
Step3VLConfig.image_token_id |
1 | 0 | 0 |
meth |
Step3VisionEncoderConfig.init |
12 | 0 | 0 |
attr |
Step3VisionEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.output_hidden_size |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.num_channels |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.image_size |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Step3VisionEncoderConfig.hidden_act |
1 | 0 | 0 |
meth |
Step3TextConfig.init |
20 | 19 | 0 |
attr |
Step3TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Step3TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Step3TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Step3TextConfig.num_attention_groups |
1 | 0 | 0 |
attr |
Step3TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Step3TextConfig.max_seq_len |
1 | 0 | 0 |
attr |
Step3TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Step3TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Step3TextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Step3TextConfig.moe_num_experts |
1 | 0 | 0 |
attr |
Step3TextConfig.moe_top_k |
1 | 0 | 0 |
attr |
Step3TextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Step3TextConfig.max_position_embedding |
1 | 0 | 0 |
attr |
Step3TextConfig.share_expert_dim |
1 | 0 | 0 |
attr |
Step3TextConfig.share_q_dim |
1 | 0 | 0 |
attr |
Step3TextConfig.head_dim |
1 | 0 | 0 |
attr |
Step3TextConfig.norm_expert_weight |
1 | 0 | 0 |
attr |
Step3TextConfig.moe_layers_enum |
1 | 0 | 0 |
vllm.transformers_utils.configs.step3p5 (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Step3p5Config.init |
37 | 35 | 0 |
attr |
Step3p5Config.hidden_size |
1 | 0 | 0 |
attr |
Step3p5Config.intermediate_size |
1 | 0 | 0 |
attr |
Step3p5Config.num_attention_heads |
1 | 0 | 0 |
attr |
Step3p5Config.num_attention_groups |
1 | 0 | 0 |
attr |
Step3p5Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Step3p5Config.max_seq_len |
1 | 0 | 0 |
attr |
Step3p5Config.vocab_size |
1 | 0 | 0 |
attr |
Step3p5Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Step3p5Config.use_moe |
1 | 0 | 0 |
attr |
Step3p5Config.moe_intermediate_size |
1 | 0 | 0 |
attr |
Step3p5Config.moe_every_n_layer |
1 | 0 | 0 |
attr |
Step3p5Config.moe_num_experts |
1 | 0 | 0 |
attr |
Step3p5Config.num_experts_per_tok |
1 | 0 | 0 |
attr |
Step3p5Config.moe_top_k |
1 | 0 | 0 |
attr |
Step3p5Config.moe_layer_offset |
1 | 0 | 0 |
attr |
Step3p5Config.rope_theta |
1 | 0 | 0 |
attr |
Step3p5Config.rope_scaling |
1 | 0 | 0 |
attr |
Step3p5Config.head_dim |
1 | 0 | 0 |
attr |
Step3p5Config.norm_expert_weight |
1 | 0 | 0 |
attr |
Step3p5Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Step3p5Config.moe_router_activation |
1 | 0 | 0 |
attr |
Step3p5Config.moe_router_scaling_factor |
1 | 0 | 0 |
attr |
Step3p5Config.use_moe_router_bias |
1 | 0 | 0 |
attr |
Step3p5Config.need_fp32_gate |
1 | 0 | 0 |
attr |
Step3p5Config.att_impl_type |
1 | 0 | 0 |
attr |
Step3p5Config.use_head_wise_attn_gate |
1 | 0 | 0 |
attr |
Step3p5Config.layer_types |
1 | 0 | 0 |
attr |
Step3p5Config.use_rope_layers |
1 | 0 | 0 |
attr |
Step3p5Config.yarn_only_types |
1 | 0 | 0 |
attr |
Step3p5Config.attention_other_setting |
1 | 0 | 0 |
attr |
Step3p5Config.num_nextn_predict_layers |
1 | 0 | 0 |
attr |
Step3p5Config.swiglu_limits |
1 | 0 | 0 |
attr |
Step3p5Config.swiglu_limits_shared |
1 | 0 | 0 |
attr |
Step3p5Config.bos_token_id |
1 | 0 | 0 |
attr |
Step3p5Config.eos_token_id |
1 | 0 | 0 |
attr |
Step3p5Config.share_expert_dim |
1 | 0 | 0 |
vllm.transformers_utils.configs.ultravox (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UltravoxConfig.init |
14 | 12 | 0 |
meth |
UltravoxConfig.setattr |
3 | 0 | 0 |
attr |
UltravoxConfig.ignore_index |
1 | 0 | 0 |
attr |
UltravoxConfig.audio_token_index |
1 | 0 | 0 |
attr |
UltravoxConfig.hidden_size |
1 | 0 | 0 |
attr |
UltravoxConfig.stack_factor |
1 | 0 | 0 |
attr |
UltravoxConfig.norm_init |
1 | 0 | 0 |
attr |
UltravoxConfig.projector_act |
1 | 0 | 0 |
attr |
UltravoxConfig.projector_ln_mid |
1 | 0 | 0 |
attr |
UltravoxConfig.num_projector_layers |
1 | 0 | 0 |
attr |
UltravoxConfig.text_model_id |
1 | 0 | 0 |
attr |
UltravoxConfig.audio_model_id |
1 | 0 | 0 |
attr |
UltravoxConfig.audio_config |
1 | 0 | 0 |
vllm.transformers_utils.dynamic_module (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
try_get_class_from_dynamic_module |
15 | 14 | 0 |
vllm.transformers_utils.gguf_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.model_arch_config_convertor (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModelArchConfigConvertorBase.init |
3 | 2 | 0 |
meth |
ModelArchConfigConvertorBase.get_torch_dtype |
5 | 4 | 0 |
meth |
ModelArchConfigConvertorBase._normalize_quantization_config |
2 | 1 | 0 |
meth |
ModelArchConfigConvertorBase.get_quantization_config |
1 | 0 | 0 |
attr |
ModelArchConfigConvertorBase.hf_config |
1 | 0 | 0 |
attr |
ModelArchConfigConvertorBase.hf_text_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.processor (11 missing, 14 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
cached_get_feature_extractor |
1 | 0 | 0 |
attr |
cached_get_image_processor |
1 | 0 | 0 |
func |
cached_image_processor_from_config |
3 | 2 | 1 |
func |
get_image_processor |
6 | 5 | 2 |
func |
get_feature_extractor |
6 | 5 | 2 |
func |
cached_processor_from_config |
4 | 4 | 1 |
func |
cached_video_processor_from_config |
4 | 3 | 1 |
func |
cached_get_processor_without_dynamic_kwargs |
7 | 7 | 2 |
func |
get_video_processor |
7 | 6 | 2 |
attr |
cached_get_video_processor |
1 | 0 | 0 |
func |
cached_feature_extractor_from_config |
3 | 2 | 1 |
attr |
logger |
1 | 0 | 0 |
func |
get_processor |
7 | 7 | 2 |
attr |
cached_get_processor |
1 | 0 | 0 |
vllm.transformers_utils.processors.bagel (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BagelProcessor.call |
4 | 3 | 0 |
meth |
BagelProcessor.batch_decode |
3 | 0 | 0 |
meth |
BagelProcessor.decode |
3 | 0 | 0 |
prop |
BagelProcessor.model_input_names |
1 | 0 | 0 |
vllm.transformers_utils.processors.deepseek_ocr (53 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
find_closest_aspect_ratio |
6 | 0 | 0 |
meth |
ImageTransform.init |
4 | 3 | 0 |
meth |
ImageTransform.call |
2 | 1 | 0 |
attr |
ImageTransform.mean |
1 | 0 | 0 |
attr |
ImageTransform.std |
1 | 0 | 0 |
attr |
ImageTransform.normalize |
1 | 0 | 0 |
attr |
ImageTransform.transform |
1 | 0 | 0 |
func |
count_tiles |
7 | 0 | 0 |
func |
dynamic_preprocess |
6 | 0 | 0 |
meth |
DeepseekOCRProcessor.init |
17 | 15 | 0 |
meth |
DeepseekOCRProcessor.encode |
4 | 3 | 0 |
meth |
DeepseekOCRProcessor.decode |
3 | 2 | 0 |
meth |
DeepseekOCRProcessor.process_one |
4 | 3 | 0 |
meth |
DeepseekOCRProcessor.call |
5 | 3 | 0 |
meth |
DeepseekOCRProcessor.tokenize_with_images |
6 | 5 | 0 |
prop |
DeepseekOCRProcessor.bos_id |
1 | 0 | 0 |
prop |
DeepseekOCRProcessor.eos_id |
1 | 0 | 0 |
prop |
DeepseekOCRProcessor.pad_id |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.image_size |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.base_size |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.strategy |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.patch_size |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.image_mean |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.image_std |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.normalize |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.downsample_ratio |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.image_transform |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.tokenizer |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.image_token_id |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.image_token |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.pad_token |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.add_special_token |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.sft_format |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.mask_prompt |
1 | 0 | 0 |
attr |
DeepseekOCRProcessor.ignore_id |
1 | 0 | 0 |
vllm.transformers_utils.processors.deepseek_vl2 (34 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLV2Processor.init |
15 | 13 | 0 |
meth |
DeepseekVLV2Processor.select_best_resolution |
2 | 0 | 0 |
meth |
DeepseekVLV2Processor.encode |
4 | 3 | 0 |
meth |
DeepseekVLV2Processor.decode |
3 | 2 | 0 |
meth |
DeepseekVLV2Processor.process_one |
5 | 4 | 1 |
meth |
DeepseekVLV2Processor.call |
5 | 4 | 1 |
meth |
DeepseekVLV2Processor.tokenize_with_images |
6 | 5 | 0 |
prop |
DeepseekVLV2Processor.bos_id |
1 | 0 | 0 |
prop |
DeepseekVLV2Processor.eos_id |
1 | 0 | 0 |
prop |
DeepseekVLV2Processor.pad_id |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.candidate_resolutions |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.image_size |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.patch_size |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.image_mean |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.image_std |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.normalize |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.downsample_ratio |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.image_transform |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.tokenizer |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.image_token_id |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.image_token |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.pad_token |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.add_special_token |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.sft_format |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.mask_prompt |
1 | 0 | 0 |
attr |
DeepseekVLV2Processor.ignore_id |
1 | 0 | 0 |
meth |
ImageTransform.init |
4 | 3 | 0 |
meth |
ImageTransform.call |
2 | 1 | 0 |
attr |
ImageTransform.mean |
1 | 0 | 0 |
attr |
ImageTransform.std |
1 | 0 | 0 |
attr |
ImageTransform.normalize |
1 | 0 | 0 |
attr |
ImageTransform.transform |
1 | 0 | 0 |
vllm.transformers_utils.processors.fireredasr2_processor (65 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FireRedASR2Processor.init |
4 | 0 | 0 |
meth |
FireRedASR2Processor.get_decoder_prompt_ids |
4 | 0 | 0 |
meth |
FireRedASR2Processor.call |
3 | 0 | 0 |
meth |
FireRedASR2Processor.get_prompt_ids |
3 | 1 | 0 |
attr |
FireRedASR2Processor.current_processor |
1 | 0 | 0 |
attr |
FireRedASR2Processor.audio_token |
1 | 0 | 0 |
attr |
FireRedASR2Processor.audio_token_id |
1 | 0 | 0 |
meth |
CMVN.init |
4 | 0 | 0 |
meth |
CMVN.call |
2 | 0 | 0 |
meth |
FireRedASR2FeatureExtractor.init |
18 | 0 | 0 |
meth |
FireRedASR2FeatureExtractor.call |
11 | 10 | 0 |
attr |
FireRedASR2FeatureExtractor.chunk_length |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.max_length |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.dim |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.means |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.inverse_std_variences |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.num_mel_bins |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.frame_length |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.frame_shift |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.dither |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.downsample_rate |
1 | 0 | 0 |
attr |
FireRedASR2FeatureExtractor.context |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
KaldifeatFbank.init |
5 | 0 | 0 |
meth |
KaldifeatFbank.call |
4 | 0 | 0 |
attr |
KaldifeatFbank.dither |
1 | 0 | 0 |
attr |
KaldifeatFbank.opts |
1 | 0 | 0 |
vllm.transformers_utils.processors.funasr_processor (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FunASRProcessor.init |
4 | 0 | 0 |
meth |
FunASRProcessor.get_decoder_prompt_ids |
4 | 0 | 0 |
meth |
FunASRProcessor.call |
3 | 0 | 0 |
meth |
FunASRProcessor.get_prompt_ids |
3 | 1 | 0 |
attr |
FunASRProcessor.current_processor |
1 | 0 | 0 |
attr |
FunASRProcessor.audio_token |
1 | 0 | 0 |
attr |
FunASRProcessor.audio_token_id |
1 | 0 | 0 |
func |
apply_lfr |
4 | 0 | 0 |
func |
apply_cmvn |
3 | 0 | 0 |
meth |
FunASRFeatureExtractor.init |
10 | 0 | 0 |
meth |
FunASRFeatureExtractor.extract_fbank |
6 | 1 | 0 |
meth |
FunASRFeatureExtractor.call |
13 | 12 | 0 |
attr |
FunASRFeatureExtractor.frontend_conf |
1 | 0 | 0 |
attr |
FunASRFeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
FunASRFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
FunASRFeatureExtractor.chunk_length |
1 | 0 | 0 |
attr |
FunASRFeatureExtractor.n_samples |
1 | 0 | 0 |
attr |
FunASRFeatureExtractor.nb_max_frames |
1 | 0 | 0 |
attr |
FunASRFeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
FunASRFeatureExtractor.dither |
1 | 0 | 0 |
func |
load_cmvn |
2 | 0 | 0 |
meth |
WavFrontend.init |
15 | 13 | 0 |
meth |
WavFrontend.forward |
4 | 2 | 0 |
attr |
WavFrontend.fs |
1 | 0 | 0 |
attr |
WavFrontend.window |
1 | 0 | 0 |
attr |
WavFrontend.n_mels |
1 | 0 | 0 |
attr |
WavFrontend.frame_length |
1 | 0 | 0 |
attr |
WavFrontend.frame_shift |
1 | 0 | 0 |
attr |
WavFrontend.filter_length_min |
1 | 0 | 0 |
attr |
WavFrontend.filter_length_max |
1 | 0 | 0 |
attr |
WavFrontend.lfr_m |
1 | 0 | 0 |
attr |
WavFrontend.lfr_n |
1 | 0 | 0 |
attr |
WavFrontend.cmvn_file |
1 | 0 | 0 |
attr |
WavFrontend.dither |
1 | 0 | 0 |
attr |
WavFrontend.snip_edges |
1 | 0 | 0 |
attr |
WavFrontend.upsacle_samples |
1 | 0 | 0 |
attr |
WavFrontend.cmvn |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.processors.hunyuan_vl (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanVLProcessor.init |
5 | 0 | 0 |
meth |
HunYuanVLProcessor.call |
5 | 4 | 0 |
meth |
HunYuanVLProcessor.batch_decode |
3 | 0 | 0 |
meth |
HunYuanVLProcessor.decode |
3 | 0 | 0 |
meth |
HunYuanVLProcessor.post_process_image_text_to_text |
5 | 0 | 0 |
meth |
HunYuanVLProcessor.apply_chat_template |
3 | 0 | 0 |
meth |
HunYuanVLProcessor.get_imgs_pos |
2 | 0 | 0 |
prop |
HunYuanVLProcessor.model_input_names |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.tokenizer |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.image_token_id |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.image_token |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.im_start_token_id |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.im_start_token |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.im_end_token_id |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.im_end_token |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.placeholder_token |
1 | 0 | 0 |
attr |
HunYuanVLProcessor.pad_id |
1 | 0 | 0 |
vllm.transformers_utils.processors.hunyuan_vl_image (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanVLImageProcessor.init |
16 | 15 | 0 |
meth |
HunYuanVLImageProcessor._preprocess |
16 | 15 | 0 |
meth |
HunYuanVLImageProcessor.preprocess |
20 | 19 | 0 |
meth |
HunYuanVLImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
HunYuanVLImageProcessor.min_pixels |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.max_pixels |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.size |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.do_resize |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.resample |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.image_mean |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.image_std |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.patch_size |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.merge_size |
1 | 0 | 0 |
attr |
HunYuanVLImageProcessor.do_convert_rgb |
1 | 0 | 0 |
func |
smart_resize |
6 | 5 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.transformers_utils.processors.ovis (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OvisProcessor.init |
7 | 0 | 0 |
meth |
OvisProcessor.get_image_size |
1 | 0 | 0 |
meth |
OvisProcessor.get_token_value |
2 | 0 | 0 |
meth |
OvisProcessor.construct_image_indicators |
2 | 0 | 0 |
meth |
OvisProcessor.construct_image_placeholders |
2 | 0 | 0 |
meth |
OvisProcessor.preprocess_image |
6 | 1 | 0 |
meth |
OvisProcessor.batch_decode |
3 | 0 | 0 |
meth |
OvisProcessor.decode |
3 | 0 | 0 |
meth |
OvisProcessor.post_process_image_text_to_text |
2 | 0 | 0 |
prop |
OvisProcessor.extra_special_tokens |
1 | 0 | 0 |
prop |
OvisProcessor.model_input_names |
1 | 0 | 0 |
attr |
OvisProcessor.image_token |
1 | 0 | 0 |
attr |
OvisProcessor.image_pad_token |
1 | 0 | 0 |
attr |
OvisProcessor.image_segment_len |
1 | 0 | 0 |
vllm.transformers_utils.processors.ovis2_5 (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ovis2_5Processor.init |
9 | 0 | 0 |
meth |
Ovis2_5Processor.smart_resize |
6 | 5 | 0 |
meth |
Ovis2_5Processor.get_token_value |
2 | 0 | 0 |
meth |
Ovis2_5Processor.construct_visual_indicators |
3 | 1 | 0 |
meth |
Ovis2_5Processor.construct_visual_placeholders |
3 | 1 | 0 |
meth |
Ovis2_5Processor.preprocess_multidata |
7 | 6 | 0 |
prop |
Ovis2_5Processor.extra_special_tokens |
1 | 0 | 0 |
attr |
Ovis2_5Processor.image_token |
1 | 0 | 0 |
attr |
Ovis2_5Processor.video_token |
1 | 0 | 0 |
attr |
Ovis2_5Processor.image_pad_token |
1 | 0 | 0 |
attr |
Ovis2_5Processor.patch_size |
1 | 0 | 0 |
attr |
Ovis2_5Processor.hidden_stride |
1 | 0 | 0 |
attr |
Ovis2_5Processor.temporal_patch_size |
1 | 0 | 0 |
vllm.transformers_utils.processors.qwen3_asr (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3ASRProcessor.init |
4 | 0 | 0 |
meth |
Qwen3ASRProcessor.call |
4 | 3 | 0 |
meth |
Qwen3ASRProcessor.replace_multimodal_special_tokens |
3 | 0 | 0 |
meth |
Qwen3ASRProcessor.apply_chat_template |
4 | 0 | 0 |
prop |
Qwen3ASRProcessor.model_input_names |
1 | 0 | 0 |
attr |
Qwen3ASRProcessor.audio_token |
1 | 0 | 0 |
attr |
Qwen3ASRProcessor.audio_bos_token |
1 | 0 | 0 |
attr |
Qwen3ASRProcessor.audio_eos_token |
1 | 0 | 0 |
vllm.transformers_utils.repo_utils (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
any_pattern_in_repo_files |
6 | 5 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
get_model_path |
3 | 2 | 0 |
func |
get_hf_file_to_dict |
4 | 3 | 0 |
vllm.transformers_utils.runai_utils (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
runai_pull_files |
1 | 0 | 0 |
attr |
runai_model_streamer |
1 | 0 | 0 |
attr |
runai_list_safetensors |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
ObjectStorageModel._close_by_signal |
2 | 0 | 0 |
attr |
ObjectStorageModel.dir |
1 | 0 | 0 |
vllm.transformers_utils.s3_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
boto3 |
1 | 0 | 0 |
vllm.transformers_utils.utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.triton_utils (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
tldevice |
1 | 0 | 0 |
attr |
tl |
1 | 0 | 0 |
attr |
triton |
1 | 0 | 0 |
vllm.triton_utils.allocation (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
set_triton_allocator |
2 | 1 | 0 |
vllm.triton_utils.importing (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
HAS_TRITON |
1 | 0 | 0 |
attr |
active_drivers |
1 | 0 | 0 |
meth |
TritonPlaceholder.init |
1 | 0 | 0 |
meth |
TritonPlaceholder._dummy_decorator |
2 | 0 | 0 |
attr |
TritonPlaceholder.jit |
1 | 0 | 0 |
attr |
TritonPlaceholder.autotune |
1 | 0 | 0 |
attr |
TritonPlaceholder.heuristics |
1 | 0 | 0 |
attr |
TritonPlaceholder.Config |
1 | 0 | 0 |
attr |
TritonPlaceholder.language |
1 | 0 | 0 |
meth |
TritonLanguagePlaceholder.init |
1 | 0 | 0 |
attr |
TritonLanguagePlaceholder.constexpr |
1 | 0 | 0 |
attr |
TritonLanguagePlaceholder.dtype |
1 | 0 | 0 |
attr |
TritonLanguagePlaceholder.int64 |
1 | 0 | 0 |
attr |
TritonLanguagePlaceholder.int32 |
1 | 0 | 0 |
attr |
TritonLanguagePlaceholder.tensor |
1 | 0 | 0 |
attr |
TritonLanguagePlaceholder.exp |
1 | 0 | 0 |
attr |
TritonLanguagePlaceholder.log |
1 | 0 | 0 |
attr |
TritonLanguagePlaceholder.log2 |
1 | 0 | 0 |
attr |
cuda_visible_devices |
1 | 0 | 0 |
attr |
is_distributed_env |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.usage.usage_lib (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UsageMessage._report_continuous_usage |
1 | 0 | 0 |
attr |
UsageMessage.uuid |
1 | 0 | 0 |
func |
is_usage_stats_enabled |
1 | 0 | 0 |
attr |
usage_message |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.utils.argparse_utils (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SortedHelpFormatter._split_lines |
3 | 0 | 0 |
meth |
SortedHelpFormatter.add_arguments |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
FlexibleArgumentParser.init |
3 | 0 | 0 |
meth |
FlexibleArgumentParser.format_help |
1 | 0 | 0 |
meth |
FlexibleArgumentParser.parse_args |
3 | 2 | 0 |
meth |
FlexibleArgumentParser.check_port |
2 | 0 | 0 |
attr |
FlexibleArgumentParser.add_json_tip |
1 | 0 | 0 |
vllm.utils.async_utils (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AsyncMicrobatchTokenizer.init |
4 | 3 | 0 |
meth |
AsyncMicrobatchTokenizer.call |
3 | 1 | 0 |
meth |
AsyncMicrobatchTokenizer.encode |
3 | 1 | 0 |
meth |
AsyncMicrobatchTokenizer.decode |
3 | 1 | 0 |
meth |
AsyncMicrobatchTokenizer._batch_encode_loop |
3 | 2 | 0 |
meth |
AsyncMicrobatchTokenizer._batch_decode_loop |
2 | 1 | 0 |
meth |
AsyncMicrobatchTokenizer.del |
1 | 0 | 0 |
attr |
AsyncMicrobatchTokenizer.tokenizer |
1 | 0 | 0 |
attr |
AsyncMicrobatchTokenizer.max_batch_size |
1 | 0 | 0 |
attr |
AsyncMicrobatchTokenizer.batch_wait_timeout_s |
1 | 0 | 0 |
func |
cancel_task_threadsafe |
2 | 1 | 0 |
func |
run_in_loop |
4 | 2 | 0 |
func |
anext |
2 | 1 | 0 |
vllm.utils.cache (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LRUCache.init |
3 | 2 | 0 |
meth |
LRUCache.popitem |
2 | 1 | 0 |
attr |
LRUCache.pinned_items |
1 | 0 | 0 |
attr |
ALL_PINNED_SENTINEL |
1 | 0 | 0 |
meth |
CacheInfo.sub |
2 | 1 | 0 |
vllm.utils.collection_utils (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LazyDict.init |
2 | 1 | 0 |
meth |
LazyDict.setitem |
3 | 2 | 0 |
meth |
LazyDict.iter |
1 | 0 | 0 |
meth |
LazyDict.len |
1 | 0 | 0 |
func |
full_groupby |
3 | 2 | 0 |
vllm.utils.counter (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Counter.counter |
1 | 0 | 0 |
vllm.utils.deep_gemm (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
m_grouped_fp8_gemm_nt_contiguous |
3 | 0 | 0 |
func |
fp8_gemm_nt |
3 | 0 | 0 |
func |
calc_diff |
3 | 2 | 0 |
func |
should_use_deepgemm_for_fp8_linear |
4 | 3 | 0 |
func |
fp8_m_grouped_gemm_nt_masked |
3 | 0 | 0 |
vllm.utils.flashinfer (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
flashinfer_fp4_quantize |
1 | 0 | 0 |
attr |
flashinfer_cutlass_fused_moe |
1 | 0 | 0 |
attr |
scaled_fp4_grouped_quantize |
1 | 0 | 0 |
attr |
silu_and_mul_scaled_nvfp4_experts_quantize |
1 | 0 | 0 |
attr |
flashinfer_cutedsl_grouped_gemm_nt_masked |
1 | 0 | 0 |
func |
should_use_flashinfer_for_blockscale_fp8_gemm |
5 | 4 | 0 |
attr |
flashinfer_fp8_blockscale_gemm |
1 | 0 | 0 |
attr |
trtllm_fp4_block_scale_moe |
1 | 0 | 0 |
attr |
flashinfer_trtllm_fp8_block_scale_moe |
1 | 0 | 0 |
attr |
nvfp4_block_scale_interleave |
1 | 0 | 0 |
attr |
autotune |
1 | 0 | 0 |
vllm.utils.func_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
identity |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.utils.gc_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
GCDebugger.config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.utils.hashing (0 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
xxhash |
2 | 2 | 1 |
func |
xxhash_cbor |
2 | 2 | 1 |
func |
sha256_cbor |
2 | 2 | 1 |
func |
sha256 |
2 | 2 | 1 |
vllm.utils.import_utils (7 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
PlaceholderModule.placeholder_attr |
2 | 1 | 0 |
func |
import_from_path |
3 | 2 | 0 |
func |
import_pynvml |
1 | 0 | 0 |
func |
import_triton_kernels |
1 | 0 | 0 |
meth |
LazyLoader.init |
4 | 3 | 0 |
meth |
LazyLoader.getattr |
2 | 2 | 2 |
func |
resolve_obj_by_qualname |
2 | 2 | 1 |
func |
get_vllm_optional_dependencies |
1 | 0 | 0 |
vllm.utils.mem_utils (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeviceMemoryProfiler.init |
2 | 1 | 0 |
meth |
DeviceMemoryProfiler.enter |
1 | 0 | 0 |
meth |
DeviceMemoryProfiler.exit |
4 | 0 | 0 |
attr |
DeviceMemoryProfiler.device |
1 | 0 | 0 |
vllm.utils.mistral (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
mt |
1 | 0 | 0 |
vllm.utils.nccl (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.utils.network_utils (2 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
make_zmq_socket |
7 | 7 | 1 |
func |
zmq_socket_ctx |
6 | 6 | 1 |
func |
close_sockets |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.utils.nvtx_pytorch_hooks (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
process_layer_params |
2 | 0 | 0 |
func |
layerwise_nvtx_marker_context |
5 | 0 | 0 |
meth |
PytHooks.init |
1 | 0 | 0 |
meth |
PytHooks._process_layer_params |
2 | 0 | 0 |
meth |
PytHooks.module_fwd_hook |
4 | 0 | 0 |
meth |
PytHooks.module_fwd_pre_hook |
4 | 0 | 0 |
meth |
PytHooks.register_hooks |
3 | 0 | 0 |
attr |
PytHooks.module_to_name_map |
1 | 0 | 0 |
func |
print_tensor |
4 | 0 | 0 |
func |
construct_marker_dict_and_push |
6 | 0 | 0 |
vllm.utils.platform_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
cuda_get_device_properties |
4 | 2 | 0 |
vllm.utils.print_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
print_embeddings |
2 | 1 | 0 |
vllm.utils.profiling (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
cprofile_context |
2 | 1 | 0 |
func |
cprofile |
3 | 2 | 0 |
vllm.utils.registry (3 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExtensionManager.register |
2 | 1 | 0 |
meth |
ExtensionManager.load |
4 | 2 | 1 |
vllm.utils.system_utils (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
update_environment_variables |
2 | 1 | 0 |
func |
suppress_stdout |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
get_mp_context |
1 | 0 | 0 |
func |
kill_process_tree |
2 | 1 | 0 |
func |
set_ulimit |
2 | 1 | 0 |
vllm.utils.tensor_schema (3 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TensorSchema.init |
4 | 4 | 1 |
meth |
TensorSchema.getitem |
2 | 2 | 1 |
meth |
TensorSchema.get |
3 | 3 | 2 |
attr |
TensorShape.dims |
1 | 0 | 0 |
attr |
TensorShape.dynamic_dims |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.utils.torch_utils (15 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
HAS_OPAQUE_TYPE |
1 | 0 | 0 |
func |
direct_register_custom_op |
8 | 7 | 0 |
func |
weak_ref_tensor |
2 | 2 | 2 |
attr |
logger |
1 | 0 | 0 |
func |
set_default_torch_num_threads |
2 | 1 | 0 |
func |
common_broadcastable_dtype |
2 | 1 | 0 |
attr |
vllm_lib |
1 | 0 | 0 |
func |
is_lossless_cast |
3 | 2 | 0 |
func |
set_default_torch_dtype |
2 | 1 | 0 |
meth |
ModuleName.init |
2 | 1 | 0 |
meth |
ModuleName.eq |
2 | 0 | 0 |
meth |
ModuleName.hash |
1 | 0 | 0 |
meth |
ModuleName.fx_repr |
1 | 0 | 0 |
attr |
ModuleName.value |
1 | 0 | 0 |
func |
guard_cuda_initialization |
1 | 0 | 0 |
vllm.utils.tqdm_utils (0 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
maybe_tqdm |
4 | 4 | 1 |
vllm.v1.attention.backend (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AttentionImpl.fused_output_quant_supported |
2 | 1 | 0 |
meth |
AttentionImpl.fused_rope_kvcache_supported |
1 | 0 | 0 |
meth |
AttentionImpl.do_rope_and_kv_cache_update |
10 | 9 | 0 |
meth |
CommonAttentionMetadata.replace |
2 | 1 | 0 |
meth |
AttentionBackend.get_builder_cls |
1 | 0 | 0 |
meth |
AttentionMetadataBuilder.init |
5 | 4 | 0 |
attr |
AttentionMetadataBuilder.kv_cache_spec |
1 | 0 | 0 |
attr |
AttentionMetadataBuilder.layer_names |
1 | 0 | 0 |
attr |
AttentionMetadataBuilder.vllm_config |
1 | 0 | 0 |
attr |
AttentionMetadataBuilder.device |
1 | 0 | 0 |
meth |
AttentionImplBase.new |
3 | 0 | 0 |
meth |
AttentionImplBase.process_weights_after_loading |
2 | 1 | 0 |
meth |
MultipleOf.init |
2 | 1 | 0 |
vllm.v1.attention.backends.cpu_attn (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CPUAttentionBackend.use_cascade_attention |
3 | 1 | 0 |
attr |
CPUAttentionMetadataBuilder.use_sdpa_prefill |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.kv_cache_spec |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.vllm_config |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.num_kv_heads |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.num_heads |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.head_dim |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.dtype |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.window_size |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.block_size |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.isa |
1 | 0 | 0 |
attr |
CPUAttentionMetadataBuilder.is_cross_attention |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.kv_sharing_target_layer_name |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.num_heads |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.head_size |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.scale |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.logits_soft_cap |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.num_kv_heads |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.alibi_slopes |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.num_queries_per_kv |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.attn_type |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.sinks |
1 | 0 | 0 |
attr |
CPUAttentionBackendImpl.sliding_window |
1 | 0 | 0 |
vllm.v1.attention.backends.fa_utils (2 missing, 5 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
flash_attn_varlen_func |
3 | 3 | 3 |
func |
get_scheduler_metadata |
3 | 3 | 2 |
func |
flash_attn_supports_mla |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.flash_attn (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
FlashAttentionImpl.num_heads |
1 | 0 | 0 |
attr |
FlashAttentionImpl.head_size |
1 | 0 | 0 |
attr |
FlashAttentionImpl.scale |
1 | 0 | 0 |
attr |
FlashAttentionImpl.num_kv_heads |
1 | 0 | 0 |
attr |
FlashAttentionImpl.alibi_slopes |
1 | 0 | 0 |
attr |
FlashAttentionImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
FlashAttentionImpl.logits_soft_cap |
1 | 0 | 0 |
attr |
FlashAttentionImpl.kv_sharing_target_layer_name |
1 | 0 | 0 |
attr |
FlashAttentionImpl.num_queries_per_kv |
1 | 0 | 0 |
attr |
FlashAttentionImpl.attn_type |
1 | 0 | 0 |
attr |
FlashAttentionImpl.vllm_flash_attn_version |
1 | 0 | 0 |
attr |
FlashAttentionImpl.batch_invariant_enabled |
1 | 0 | 0 |
attr |
FlashAttentionImpl.sinks |
1 | 0 | 0 |
attr |
FlashAttentionImpl.supports_quant_query_input |
1 | 0 | 0 |
attr |
FlashAttentionImpl.sliding_window |
1 | 0 | 0 |
meth |
FlashAttentionMetadataBuilder.init |
5 | 4 | 0 |
meth |
FlashAttentionMetadataBuilder.use_cascade_attention |
3 | 1 | 0 |
attr |
FlashAttentionMetadataBuilder._cudagraph_support |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.model_config |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.parallel_config |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.cache_config |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.compilation_config |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.attention_config |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.num_heads_q |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.num_heads_kv |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.kv_cache_dtype |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.headdim |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.block_size |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.max_num_splits |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.aot_schedule |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.cp_kv_cache_interleave_size |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.use_full_cuda_graph |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.max_cudagraph_size |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.dcp_world_size |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.dcp_rank |
1 | 0 | 0 |
attr |
FlashAttentionMetadataBuilder.scheduler_metadata |
1 | 0 | 0 |
vllm.v1.attention.backends.flash_attn_diffkv (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.flashinfer (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BatchDCPPrefillWrapper.init |
2 | 1 | 0 |
meth |
BatchDCPPrefillWrapper.plan |
17 | 16 | 0 |
meth |
BatchDCPPrefillWrapper.run |
7 | 6 | 0 |
func |
fast_plan_decode |
22 | 21 | 0 |
attr |
FP8_DTYPE |
1 | 0 | 0 |
meth |
FlashInferMetadataBuilder.init |
5 | 4 | 0 |
meth |
FlashInferMetadataBuilder._get_workspace_buffer |
1 | 0 | 0 |
meth |
FlashInferMetadataBuilder.set_workspace_buffer |
2 | 1 | 0 |
meth |
FlashInferMetadataBuilder._get_decode_wrapper |
3 | 2 | 0 |
meth |
FlashInferMetadataBuilder._get_cascade_wrapper |
1 | 0 | 0 |
meth |
FlashInferMetadataBuilder.use_cascade_attention |
3 | 1 | 0 |
attr |
FlashInferMetadataBuilder.cache_config |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.model_config |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.attention_config |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.compilation_config |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.enable_cuda_graph |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.use_dcp |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.num_qo_heads |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.num_kv_heads |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.head_dim |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.page_size |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.cache_dtype |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.use_trtllm_decode_attention |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.global_hyperparameters |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.sm_scale |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.window_left |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.logits_soft_cap |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.has_sinks |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.pin_memory |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.paged_kv_indptr |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.paged_kv_indptr_cpu_buffer |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.paged_kv_indices |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.paged_kv_last_page_len |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.decode_fixed_split_size |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.prefill_fixed_split_size |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.disable_split_kv |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.dcp_world_size |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.dcp_rank |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.dcp_kv_cache_interleave_size |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.kv_cache_dtype |
1 | 0 | 0 |
attr |
FlashInferMetadataBuilder.q_data_type |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
FlashInferImpl.fused_output_quant_supported |
2 | 1 | 0 |
meth |
FlashInferImpl.process_weights_after_loading |
2 | 1 | 0 |
attr |
FlashInferImpl.num_heads |
1 | 0 | 0 |
attr |
FlashInferImpl.head_size |
1 | 0 | 0 |
attr |
FlashInferImpl.scale |
1 | 0 | 0 |
attr |
FlashInferImpl.num_kv_heads |
1 | 0 | 0 |
attr |
FlashInferImpl.alibi_slopes |
1 | 0 | 0 |
attr |
FlashInferImpl.window_left |
1 | 0 | 0 |
attr |
FlashInferImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
FlashInferImpl.logits_soft_cap |
1 | 0 | 0 |
attr |
FlashInferImpl.kv_sharing_target_layer_name |
1 | 0 | 0 |
attr |
FlashInferImpl.num_queries_per_kv |
1 | 0 | 0 |
attr |
FlashInferImpl.support_trtllm_attn |
1 | 0 | 0 |
attr |
FlashInferImpl.supports_quant_query_input |
1 | 0 | 0 |
attr |
FlashInferImpl.sliding_window |
1 | 0 | 0 |
vllm.v1.attention.backends.flex_attention (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlexAttentionBackend.use_cascade_attention |
3 | 1 | 0 |
attr |
flex_attention_compiled |
1 | 0 | 0 |
meth |
FlexAttentionMetadataBuilder.init |
5 | 4 | 0 |
meth |
FlexAttentionMetadataBuilder.use_cascade_attention |
3 | 1 | 0 |
attr |
FlexAttentionMetadataBuilder.model_config |
1 | 0 | 0 |
attr |
FlexAttentionMetadataBuilder.parallel_config |
1 | 0 | 0 |
attr |
FlexAttentionMetadataBuilder.cache_config |
1 | 0 | 0 |
attr |
FlexAttentionMetadataBuilder.num_heads_q |
1 | 0 | 0 |
attr |
FlexAttentionMetadataBuilder.num_heads_kv |
1 | 0 | 0 |
attr |
FlexAttentionMetadataBuilder.headdim |
1 | 0 | 0 |
attr |
FlexAttentionMetadataBuilder.block_size |
1 | 0 | 0 |
attr |
FlexAttentionMetadataBuilder.kv_cache_spec |
1 | 0 | 0 |
func |
causal_mask_mod |
5 | 4 | 0 |
func |
get_kernel_options |
5 | 2 | 0 |
func |
pad_to_multiple |
4 | 3 | 0 |
meth |
FlexAttentionMetadata.get_mask_mod |
1 | 0 | 0 |
meth |
FlexAttentionMetadata.post_init |
1 | 0 | 0 |
prop |
FlexAttentionMetadata.logical_block_ids |
1 | 0 | 0 |
attr |
create_block_mask_compiled |
1 | 0 | 0 |
meth |
FlexAttentionImpl.init |
12 | 11 | 0 |
attr |
FlexAttentionImpl.num_heads |
1 | 0 | 0 |
attr |
FlexAttentionImpl.head_size |
1 | 0 | 0 |
attr |
FlexAttentionImpl.scale |
1 | 0 | 0 |
attr |
FlexAttentionImpl.num_kv_heads |
1 | 0 | 0 |
attr |
FlexAttentionImpl.attn_type |
1 | 0 | 0 |
attr |
FlexAttentionImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
FlexAttentionImpl.num_queries_per_kv |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.gdn_attn (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GDNAttentionMetadataBuilder.init |
5 | 4 | 0 |
meth |
GDNAttentionMetadataBuilder.build_for_cudagraph_capture |
2 | 1 | 0 |
attr |
GDNAttentionMetadataBuilder.vllm_config |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.compilation_config |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.speculative_config |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.kv_cache_spec |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.use_spec_decode |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.use_full_cuda_graph |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.decode_cudagraph_max_bs |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.spec_state_indices_tensor |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.non_spec_state_indices_tensor |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.spec_sequence_masks |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.spec_token_indx |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.non_spec_token_indx |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.spec_query_start_loc |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.non_spec_query_start_loc |
1 | 0 | 0 |
attr |
GDNAttentionMetadataBuilder.num_accepted_tokens |
1 | 0 | 0 |
vllm.v1.attention.backends.linear_attn (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LinearAttentionMetadataBuilder.init |
5 | 4 | 0 |
vllm.v1.attention.backends.mamba1_attn (0 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mamba1AttentionMetadataBuilder.build |
5 | 5 | 1 |
vllm.v1.attention.backends.mamba2_attn (1 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mamba2AttentionMetadataBuilder.init |
5 | 4 | 0 |
meth |
Mamba2AttentionMetadataBuilder.build |
5 | 5 | 1 |
vllm.v1.attention.backends.mamba_attn (9 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseMambaAttentionMetadataBuilder.init |
5 | 4 | 0 |
meth |
BaseMambaAttentionMetadataBuilder.build |
6 | 6 | 1 |
attr |
BaseMambaAttentionMetadataBuilder.speculative_config |
1 | 0 | 0 |
attr |
BaseMambaAttentionMetadataBuilder.compilation_config |
1 | 0 | 0 |
attr |
BaseMambaAttentionMetadataBuilder.use_spec_decode |
1 | 0 | 0 |
attr |
BaseMambaAttentionMetadataBuilder.decode_cudagraph_max_bs |
1 | 0 | 0 |
attr |
BaseMambaAttentionMetadataBuilder.state_indices_tensor_d |
1 | 0 | 0 |
attr |
BaseMambaAttentionMetadataBuilder.block_idx_last_scheduled_token |
1 | 0 | 0 |
attr |
BaseMambaAttentionMetadataBuilder.block_idx_last_computed_token |
1 | 0 | 0 |
attr |
BaseMambaAttentionMetadataBuilder.decode_num_accepted_tokens |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.aiter_triton_mla (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AiterTritonMLAImpl.init |
12 | 11 | 0 |
meth |
AiterTritonMLAImpl._flash_attn_varlen_diff_headdims |
7 | 0 | 0 |
attr |
AiterTritonMLAImpl.flash_attn_varlen_func |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.cutlass_mla (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
g_sm100_workspace |
1 | 0 | 0 |
meth |
SM100Workspace.init |
2 | 0 | 0 |
meth |
SM100Workspace.get_buf |
1 | 0 | 0 |
meth |
SM100Workspace.ensure_size |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
CutlassMLAImpl.init |
12 | 11 | 0 |
vllm.v1.attention.backends.mla.flashattn_mla (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttnMLAImpl.init |
12 | 11 | 0 |
meth |
FlashAttnMLAMetadataBuilder.init |
5 | 4 | 0 |
meth |
FlashAttnMLAMetadataBuilder._schedule_decode |
8 | 0 | 0 |
attr |
FlashAttnMLAMetadataBuilder.max_num_splits |
1 | 0 | 0 |
attr |
FlashAttnMLAMetadataBuilder.fa_aot_schedule |
1 | 0 | 0 |
attr |
FlashAttnMLAMetadataBuilder.use_full_cuda_graph |
1 | 0 | 0 |
attr |
FlashAttnMLAMetadataBuilder.max_cudagraph_size |
1 | 0 | 0 |
attr |
FlashAttnMLAMetadataBuilder.scheduler_metadata |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.flashinfer_mla (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashInferMLAImpl.init |
12 | 11 | 0 |
attr |
g_fi_workspace |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.flashinfer_mla_sparse (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
FlashInferMLASparseImpl.init |
14 | 13 | 0 |
attr |
FlashInferMLASparseImpl.num_heads |
1 | 0 | 0 |
attr |
FlashInferMLASparseImpl.head_size |
1 | 0 | 0 |
attr |
FlashInferMLASparseImpl.scale |
1 | 0 | 0 |
attr |
FlashInferMLASparseImpl.num_kv_heads |
1 | 0 | 0 |
attr |
FlashInferMLASparseImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
FlashInferMLASparseMetadataBuilder.vllm_config |
1 | 0 | 0 |
attr |
FlashInferMLASparseMetadataBuilder.layer_names |
1 | 0 | 0 |
attr |
FlashInferMLASparseMetadataBuilder.kv_cache_spec |
1 | 0 | 0 |
attr |
FlashInferMLASparseMetadataBuilder.model_config |
1 | 0 | 0 |
attr |
FlashInferMLASparseMetadataBuilder.device |
1 | 0 | 0 |
attr |
FlashInferMLASparseMetadataBuilder.mla_dims |
1 | 0 | 0 |
attr |
FlashInferMLASparseMetadataBuilder.topk_tokens |
1 | 0 | 0 |
attr |
FlashInferMLASparseMetadataBuilder.req_id_per_token_buffer |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.flashmla (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashMLAImpl.init |
12 | 11 | 0 |
meth |
FlashMLAMetadataBuilder.init |
5 | 4 | 0 |
attr |
FlashMLAMetadataBuilder.num_q_heads |
1 | 0 | 0 |
attr |
FlashMLAMetadataBuilder.cg_buf_tile_scheduler_metadata |
1 | 0 | 0 |
attr |
FlashMLAMetadataBuilder.cg_buf_num_splits |
1 | 0 | 0 |
attr |
FlashMLAMetadataBuilder.is_fp8_kvcache |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.flashmla_sparse (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
FlashMLASparseMetadataBuilder.vllm_config |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.layer_names |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.kv_cache_spec |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.model_config |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.device |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.num_heads |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.mla_dims |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.fp8_decode_padded_heads |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.topk_tokens |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.use_fp8_kv_cache |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.topk_tokens_tensor |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.max_model_len_tensor |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.dummy_block_table |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.tile_scheduler_metadata_buffer |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.num_splits_buffer |
1 | 0 | 0 |
attr |
FlashMLASparseMetadataBuilder.req_id_per_token_buffer |
1 | 0 | 0 |
meth |
FlashMLASparseImpl.init |
14 | 13 | 0 |
attr |
FlashMLASparseImpl.num_heads |
1 | 0 | 0 |
attr |
FlashMLASparseImpl.head_size |
1 | 0 | 0 |
attr |
FlashMLASparseImpl.scale |
1 | 0 | 0 |
attr |
FlashMLASparseImpl.num_kv_heads |
1 | 0 | 0 |
attr |
FlashMLASparseImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
FlashMLASparseImpl.softmax_scale |
1 | 0 | 0 |
attr |
FlashMLASparseImpl.prefill_padding |
1 | 0 | 0 |
attr |
FlashMLASparseImpl.fp8_decode_padded_heads |
1 | 0 | 0 |
attr |
FlashMLASparseImpl.prefill_workspace_shape |
1 | 0 | 0 |
func |
get_prefill_workspace_size |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.indexer (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
get_max_prefill_buffer_size |
2 | 1 | 0 |
meth |
DeepseekV32IndexerMetadataBuilder.init |
3 | 0 | 0 |
meth |
DeepseekV32IndexerMetadataBuilder.build_one_prefill_chunk |
6 | 0 | 0 |
attr |
DeepseekV32IndexerMetadataBuilder.max_prefill_buffer_size |
1 | 0 | 0 |
attr |
DeepseekV32IndexerMetadataBuilder.num_speculative_tokens |
1 | 0 | 0 |
attr |
DeepseekV32IndexerMetadataBuilder.num_sms |
1 | 0 | 0 |
attr |
DeepseekV32IndexerMetadataBuilder.decode_lens_buffer |
1 | 0 | 0 |
attr |
DeepseekV32IndexerMetadataBuilder.arange_buffer |
1 | 0 | 0 |
attr |
DeepseekV32IndexerMetadataBuilder.expanded_seq_lens_buffer |
1 | 0 | 0 |
attr |
DeepseekV32IndexerMetadataBuilder.expanded_block_table_buffer |
1 | 0 | 0 |
attr |
DeepseekV32IndexerMetadataBuilder.scheduler_metadata_buffer |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.rocm_aiter_mla (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AiterMLAImpl.init |
12 | 11 | 0 |
meth |
AiterMLAImpl._flash_attn_varlen_diff_headdims |
7 | 0 | 0 |
attr |
AiterMLAImpl.flash_attn_varlen_func |
1 | 0 | 0 |
meth |
AiterMLAMetadataBuilder.init |
5 | 4 | 0 |
attr |
AiterMLAMetadataBuilder.compilation_config |
1 | 0 | 0 |
attr |
AiterMLAMetadataBuilder.decode_attn_out_dtype |
1 | 0 | 0 |
attr |
AiterMLAMetadataBuilder.paged_kv_last_page_len |
1 | 0 | 0 |
attr |
AiterMLAMetadataBuilder.paged_kv_indptr |
1 | 0 | 0 |
attr |
AiterMLAMetadataBuilder.paged_kv_indices |
1 | 0 | 0 |
attr |
AiterMLAMetadataBuilder.qo_indptr |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.rocm_aiter_mla_sparse (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
fetch_id_to_ragged_kernel |
8 | 3 | 0 |
func |
fetch_id_to_ragged_triton |
5 | 3 | 0 |
meth |
ROCMAiterMLASparseMetadataBuilder.init |
5 | 4 | 0 |
meth |
ROCMAiterMLASparseImpl.init |
14 | 13 | 0 |
attr |
ROCMAiterMLASparseImpl.num_heads |
1 | 0 | 0 |
attr |
ROCMAiterMLASparseImpl.head_size |
1 | 0 | 0 |
attr |
ROCMAiterMLASparseImpl.scale |
1 | 0 | 0 |
attr |
ROCMAiterMLASparseImpl.num_kv_heads |
1 | 0 | 0 |
attr |
ROCMAiterMLASparseImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
ROCMAiterMLASparseImpl.softmax_scale |
1 | 0 | 0 |
vllm.v1.attention.backends.mla.triton_mla (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
TritonMLAImpl.init |
12 | 11 | 0 |
meth |
TritonMLAImpl._flash_attn_varlen_diff_headdims |
7 | 0 | 0 |
vllm.v1.attention.backends.registry (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.rocm_aiter_fa (65 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
block_size |
3 | 0 | 0 |
func |
cp_mha_gather_cache |
14 | 13 | 0 |
meth |
AiterFlashAttentionMetadataBuilder.init |
5 | 4 | 0 |
meth |
AiterFlashAttentionMetadataBuilder.build_for_cudagraph_capture |
2 | 1 | 0 |
meth |
AiterFlashAttentionMetadataBuilder.use_cascade_attention |
3 | 1 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.model_config |
1 | 0 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.parallel_config |
1 | 0 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.cache_config |
1 | 0 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.num_heads_q |
1 | 0 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.num_heads_kv |
1 | 0 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.headdim |
1 | 0 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.block_size |
1 | 0 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.extend_workspace |
1 | 0 | 0 |
attr |
AiterFlashAttentionMetadataBuilder.scale |
1 | 0 | 0 |
func |
reshape_and_cache_shuffle_kernel |
17 | 3 | 0 |
meth |
AiterFlashAttentionImpl.extend_for_sliding_window |
11 | 8 | 0 |
meth |
AiterFlashAttentionImpl.extend_forward |
16 | 15 | 0 |
meth |
AiterFlashAttentionImpl.do_kv_cache_update |
6 | 5 | 0 |
attr |
AiterFlashAttentionImpl.num_heads |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.head_size |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.scale |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.num_kv_heads |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.alibi_slopes |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.logits_soft_cap |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.kv_sharing_target_layer_name |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.num_queries_per_kv |
1 | 0 | 0 |
attr |
AiterFlashAttentionImpl.sliding_window |
1 | 0 | 0 |
func |
reshape_and_cache_shuffle_triton |
9 | 8 | 0 |
func |
cp_mha_gather_cache_kernel |
19 | 4 | 0 |
func |
num_programs |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.rocm_aiter_unified_attn (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RocmAiterUnifiedAttentionBackend.use_cascade_attention |
3 | 1 | 0 |
meth |
RocmAiterUnifiedAttentionImpl.fused_output_quant_supported |
2 | 1 | 0 |
meth |
RocmAiterUnifiedAttentionImpl.do_kv_cache_update |
6 | 5 | 0 |
meth |
RocmAiterUnifiedAttentionImpl.fused_rope_kvcache_supported |
1 | 0 | 0 |
meth |
RocmAiterUnifiedAttentionImpl.do_rope_and_kv_cache_update |
10 | 9 | 0 |
attr |
RocmAiterUnifiedAttentionImpl.unified_attention |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.rocm_attn (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RocmAttentionBackend.use_cascade_attention |
3 | 1 | 0 |
meth |
RocmAttentionMetadataBuilder.init |
5 | 4 | 0 |
attr |
RocmAttentionMetadataBuilder.block_size |
1 | 0 | 0 |
attr |
RocmAttentionMetadataBuilder.num_heads_q |
1 | 0 | 0 |
attr |
RocmAttentionMetadataBuilder.num_heads_kv |
1 | 0 | 0 |
attr |
RocmAttentionMetadataBuilder.headdim |
1 | 0 | 0 |
meth |
RocmAttentionImpl.fused_output_quant_supported |
2 | 1 | 0 |
meth |
RocmAttentionImpl.do_kv_cache_update |
6 | 5 | 0 |
meth |
RocmAttentionImpl.fused_rope_kvcache_supported |
1 | 0 | 0 |
meth |
RocmAttentionImpl.do_rope_and_kv_cache_update |
10 | 9 | 0 |
attr |
RocmAttentionImpl.attn_type |
1 | 0 | 0 |
attr |
RocmAttentionImpl.num_heads |
1 | 0 | 0 |
attr |
RocmAttentionImpl.head_size |
1 | 0 | 0 |
attr |
RocmAttentionImpl.scale |
1 | 0 | 0 |
attr |
RocmAttentionImpl.num_kv_heads |
1 | 0 | 0 |
attr |
RocmAttentionImpl.alibi_slopes |
1 | 0 | 0 |
attr |
RocmAttentionImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
RocmAttentionImpl.logits_soft_cap |
1 | 0 | 0 |
attr |
RocmAttentionImpl.kv_sharing_target_layer_name |
1 | 0 | 0 |
attr |
RocmAttentionImpl.num_queries_per_kv |
1 | 0 | 0 |
attr |
RocmAttentionImpl.fp8_dtype |
1 | 0 | 0 |
attr |
RocmAttentionImpl.sinks |
1 | 0 | 0 |
attr |
RocmAttentionImpl.sliding_window |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.backends.tree_attn (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TreeAttentionMetadataBuilder.init |
5 | 4 | 0 |
attr |
TreeAttentionMetadataBuilder.block_size |
1 | 0 | 0 |
attr |
TreeAttentionMetadataBuilder.tree_attn_bias |
1 | 0 | 0 |
attr |
TreeAttentionMetadataBuilder.reorder_batch_threshold |
1 | 0 | 0 |
meth |
TreeAttentionBackend.use_cascade_attention |
3 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
TreeAttentionImpl.num_heads |
1 | 0 | 0 |
attr |
TreeAttentionImpl.head_size |
1 | 0 | 0 |
attr |
TreeAttentionImpl.scale |
1 | 0 | 0 |
attr |
TreeAttentionImpl.num_kv_heads |
1 | 0 | 0 |
attr |
TreeAttentionImpl.num_queries_per_kv |
1 | 0 | 0 |
attr |
TreeAttentionImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
TreeAttentionImpl.kv_sharing_target_layer_name |
1 | 0 | 0 |
attr |
TreeAttentionImpl.alibi_slopes |
1 | 0 | 0 |
attr |
TreeAttentionImpl.logits_soft_cap |
1 | 0 | 0 |
attr |
TreeAttentionImpl.sliding_window |
1 | 0 | 0 |
vllm.v1.attention.backends.triton_attn (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
TritonAttentionImpl.fused_output_quant_supported |
2 | 1 | 0 |
meth |
TritonAttentionImpl.do_kv_cache_update |
6 | 5 | 0 |
meth |
TritonAttentionImpl.fused_rope_kvcache_supported |
1 | 0 | 0 |
meth |
TritonAttentionImpl.do_rope_and_kv_cache_update |
10 | 9 | 0 |
attr |
TritonAttentionImpl.num_heads |
1 | 0 | 0 |
attr |
TritonAttentionImpl.head_size |
1 | 0 | 0 |
attr |
TritonAttentionImpl.scale |
1 | 0 | 0 |
attr |
TritonAttentionImpl.num_kv_heads |
1 | 0 | 0 |
attr |
TritonAttentionImpl.alibi_slopes |
1 | 0 | 0 |
attr |
TritonAttentionImpl.kv_cache_dtype |
1 | 0 | 0 |
attr |
TritonAttentionImpl.logits_soft_cap |
1 | 0 | 0 |
attr |
TritonAttentionImpl.kv_sharing_target_layer_name |
1 | 0 | 0 |
attr |
TritonAttentionImpl.num_queries_per_kv |
1 | 0 | 0 |
attr |
TritonAttentionImpl.attn_type |
1 | 0 | 0 |
attr |
TritonAttentionImpl.fp8_dtype |
1 | 0 | 0 |
attr |
TritonAttentionImpl.sinks |
1 | 0 | 0 |
attr |
TritonAttentionImpl.use_alibi_sqrt |
1 | 0 | 0 |
attr |
TritonAttentionImpl.supports_quant_query_input |
1 | 0 | 0 |
attr |
TritonAttentionImpl.sliding_window |
1 | 0 | 0 |
meth |
TritonAttentionMetadataBuilder.init |
5 | 4 | 0 |
attr |
TritonAttentionMetadataBuilder.block_size |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.num_heads_q |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.num_heads_kv |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.headdim |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.decode_cudagraph_enabled |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.seq_threshold_3D |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.num_par_softmax_segments |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.softmax_segm_output |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.softmax_segm_max |
1 | 0 | 0 |
attr |
TritonAttentionMetadataBuilder.softmax_segm_expsum |
1 | 0 | 0 |
meth |
TritonAttentionBackend.use_cascade_attention |
3 | 1 | 0 |
vllm.v1.attention.backends.utils (4 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
compute_causal_conv1d_metadata |
3 | 2 | 0 |
func |
subclass_attention_metadata |
4 | 4 | 2 |
func |
set_kv_cache_layout |
2 | 1 | 0 |
func |
get_kv_cache_layout |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.ops.chunked_prefill_paged_decode (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
chunked_prefill_paged_decode |
21 | 1 | 0 |
func |
cdiv_fn |
3 | 0 | 0 |
attr |
float8_info |
1 | 0 | 0 |
func |
kernel_paged_attention_2d |
43 | 29 | 0 |
vllm.v1.attention.ops.common (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CPTritonContext.init |
1 | 0 | 0 |
meth |
CPTritonContext.call_kernel |
5 | 0 | 0 |
attr |
CPTritonContext.inner_kernel |
1 | 0 | 0 |
func |
cp_lse_ag_out_ar |
7 | 5 | 0 |
func |
cp_lse_ag_out_rs |
7 | 5 | 0 |
vllm.v1.attention.ops.flashmla (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.v1.attention.ops.prefix_prefill (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
NUM_WARPS |
1 | 0 | 0 |
attr |
float8_info |
1 | 0 | 0 |
attr |
BASE_BLOCK |
1 | 0 | 0 |
func |
context_attention_fwd |
22 | 4 | 0 |
attr |
IS_TURING |
1 | 0 | 0 |
vllm.v1.attention.ops.rocm_aiter_mla_sparse (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
cp_gather_indexer_k_quant_cache_triton |
9 | 8 | 0 |
func |
fp8_paged_mqa_logits_torch |
7 | 6 | 0 |
func |
indexer_k_quant_and_cache_triton |
8 | 3 | 0 |
vllm.v1.attention.ops.triton_decode_attention (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
decode_attention_fwd |
13 | 0 | 0 |
attr |
is_hip_ |
1 | 0 | 0 |
func |
decode_attention_fwd_grouped |
13 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
decode_attention_fwd_normal |
13 | 0 | 0 |
func |
tanh |
2 | 0 | 0 |
vllm.v1.attention.ops.triton_merge_attn_states (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
merge_attn_states_kernel |
12 | 3 | 0 |
vllm.v1.attention.ops.triton_prefill_attention (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
context_attention_fwd |
12 | 11 | 0 |
vllm.v1.attention.ops.triton_reshape_and_cache_flash (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
reshape_and_cache_kernel_flash |
22 | 14 | 0 |
func |
triton_reshape_and_cache_flash |
9 | 8 | 0 |
func |
reshape_and_cache_kernel_flash_diffkv |
17 | 10 | 0 |
func |
triton_reshape_and_cache_flash_diffkv |
8 | 7 | 0 |
vllm.v1.attention.ops.triton_unified_attention (85 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
kernel_unified_attention_3d |
48 | 30 | 0 |
attr |
is_batch_invariant |
1 | 0 | 0 |
func |
cdiv_fn |
3 | 0 | 0 |
attr |
float8_info |
1 | 0 | 0 |
func |
apply_softcap |
3 | 0 | 0 |
func |
find_seq_idx |
6 | 2 | 0 |
func |
unified_attention |
28 | 0 | 0 |
func |
kernel_unified_attention_2d |
51 | 34 | 0 |
func |
reduce_segments |
21 | 12 | 0 |
vllm.v1.attention.selector (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
AttentionSelectorConfig.repr |
1 | 0 | 0 |
vllm.v1.core.block_pool (10 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlockPool.init |
6 | 5 | 0 |
attr |
BlockPool.num_gpu_blocks |
1 | 0 | 0 |
attr |
BlockPool.enable_caching |
1 | 0 | 0 |
attr |
BlockPool.hash_block_size |
1 | 0 | 0 |
attr |
BlockPool.free_block_queue |
1 | 0 | 0 |
attr |
BlockPool.null_block |
1 | 0 | 0 |
attr |
BlockPool.enable_kv_cache_events |
1 | 0 | 0 |
attr |
BlockPool.metrics_collector |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
BlockHashToBlockMap.init |
1 | 0 | 0 |
meth |
BlockHashToBlockMap._unexpected_blocks_type |
2 | 2 | 1 |
vllm.v1.core.encoder_cache_manager (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncoderDecoderCacheManager.init |
2 | 1 | 0 |
attr |
EncoderDecoderCacheManager.cache_size |
1 | 0 | 0 |
attr |
EncoderDecoderCacheManager.num_free_slots |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
EncoderCacheManager.init |
2 | 1 | 0 |
attr |
EncoderCacheManager.cache_size |
1 | 0 | 0 |
attr |
EncoderCacheManager.num_free_slots |
1 | 0 | 0 |
attr |
EncoderCacheManager.num_freeable_slots |
1 | 0 | 0 |
vllm.v1.core.kv_cache_coordinator (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UnitaryKVCacheCoordinator.init |
10 | 9 | 0 |
attr |
UnitaryKVCacheCoordinator.kv_cache_spec |
1 | 0 | 0 |
attr |
UnitaryKVCacheCoordinator.block_size |
1 | 0 | 0 |
attr |
UnitaryKVCacheCoordinator.dcp_world_size |
1 | 0 | 0 |
attr |
UnitaryKVCacheCoordinator.pcp_world_size |
1 | 0 | 0 |
meth |
KVCacheCoordinator.init |
10 | 9 | 0 |
attr |
KVCacheCoordinator.kv_cache_config |
1 | 0 | 0 |
attr |
KVCacheCoordinator.max_model_len |
1 | 0 | 0 |
attr |
KVCacheCoordinator.enable_caching |
1 | 0 | 0 |
attr |
KVCacheCoordinator.block_pool |
1 | 0 | 0 |
attr |
KVCacheCoordinator.use_eagle |
1 | 0 | 0 |
attr |
KVCacheCoordinator.single_type_managers |
1 | 0 | 0 |
meth |
KVCacheCoordinatorNoPrefixCache.init |
9 | 8 | 0 |
attr |
KVCacheCoordinatorNoPrefixCache.num_single_type_manager |
1 | 0 | 0 |
meth |
HybridKVCacheCoordinator.init |
10 | 9 | 0 |
attr |
HybridKVCacheCoordinator.hash_block_size |
1 | 0 | 0 |
vllm.v1.core.kv_cache_manager (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
KVCacheManager.max_model_len |
1 | 0 | 0 |
attr |
KVCacheManager.enable_caching |
1 | 0 | 0 |
attr |
KVCacheManager.use_eagle |
1 | 0 | 0 |
attr |
KVCacheManager.log_stats |
1 | 0 | 0 |
attr |
KVCacheManager.metrics_collector |
1 | 0 | 0 |
attr |
KVCacheManager.prefix_cache_stats |
1 | 0 | 0 |
attr |
KVCacheManager.coordinator |
1 | 0 | 0 |
attr |
KVCacheManager.num_kv_cache_groups |
1 | 0 | 0 |
attr |
KVCacheManager.block_pool |
1 | 0 | 0 |
attr |
KVCacheManager.kv_cache_config |
1 | 0 | 0 |
attr |
KVCacheManager.empty_kv_cache_blocks |
1 | 0 | 0 |
vllm.v1.core.kv_cache_metrics (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KVCacheMetricsCollector.init |
2 | 1 | 0 |
attr |
KVCacheMetricsCollector.sample_rate |
1 | 0 | 0 |
meth |
BlockMetricsState.init |
1 | 0 | 0 |
attr |
BlockMetricsState.birth_time_ns |
1 | 0 | 0 |
attr |
BlockMetricsState.last_access_ns |
1 | 0 | 0 |
vllm.v1.core.kv_cache_utils (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KVCacheBlock.reset_hash |
1 | 0 | 0 |
meth |
BlockHashListWithBlockSize.init |
4 | 3 | 0 |
attr |
BlockHashListWithBlockSize.block_hashes |
1 | 0 | 0 |
attr |
BlockHashListWithBlockSize.scale_factor |
1 | 0 | 0 |
func |
unify_hybrid_kv_cache_specs |
2 | 1 | 0 |
func |
check_enough_kv_cache_memory |
4 | 3 | 0 |
attr |
FreeKVCacheBlockQueue.num_free_blocks |
1 | 0 | 0 |
attr |
FreeKVCacheBlockQueue.fake_free_list_head |
1 | 0 | 0 |
attr |
FreeKVCacheBlockQueue.fake_free_list_tail |
1 | 0 | 0 |
func |
init_none_hash |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.core.sched.async_scheduler (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AsyncScheduler.init |
3 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.core.sched.scheduler (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
Scheduler._free_blocks |
2 | 1 | 0 |
meth |
Scheduler._update_from_kv_xfer_finished |
2 | 1 | 0 |
attr |
Scheduler.vllm_config |
1 | 0 | 0 |
attr |
Scheduler.scheduler_config |
1 | 0 | 0 |
attr |
Scheduler.cache_config |
1 | 0 | 0 |
attr |
Scheduler.lora_config |
1 | 0 | 0 |
attr |
Scheduler.kv_cache_config |
1 | 0 | 0 |
attr |
Scheduler.kv_events_config |
1 | 0 | 0 |
attr |
Scheduler.parallel_config |
1 | 0 | 0 |
attr |
Scheduler.log_stats |
1 | 0 | 0 |
attr |
Scheduler.observability_config |
1 | 0 | 0 |
attr |
Scheduler.structured_output_manager |
1 | 0 | 0 |
attr |
Scheduler.is_encoder_decoder |
1 | 0 | 0 |
attr |
Scheduler.max_num_running_reqs |
1 | 0 | 0 |
attr |
Scheduler.max_num_scheduled_tokens |
1 | 0 | 0 |
attr |
Scheduler.max_model_len |
1 | 0 | 0 |
attr |
Scheduler.enable_kv_cache_events |
1 | 0 | 0 |
attr |
Scheduler.connector |
1 | 0 | 0 |
attr |
Scheduler.recompute_kv_load_failures |
1 | 0 | 0 |
attr |
Scheduler.kv_event_publisher |
1 | 0 | 0 |
attr |
Scheduler.ec_connector |
1 | 0 | 0 |
attr |
Scheduler.block_size |
1 | 0 | 0 |
attr |
Scheduler.dcp_world_size |
1 | 0 | 0 |
attr |
Scheduler.pcp_world_size |
1 | 0 | 0 |
attr |
Scheduler.waiting |
1 | 0 | 0 |
attr |
Scheduler.supports_mm_inputs |
1 | 0 | 0 |
attr |
Scheduler.mm_budget |
1 | 0 | 0 |
attr |
Scheduler.max_num_encoder_input_tokens |
1 | 0 | 0 |
attr |
Scheduler.encoder_cache_manager |
1 | 0 | 0 |
attr |
Scheduler.use_eagle |
1 | 0 | 0 |
attr |
Scheduler.num_spec_tokens |
1 | 0 | 0 |
attr |
Scheduler.num_lookahead_tokens |
1 | 0 | 0 |
attr |
Scheduler.kv_cache_manager |
1 | 0 | 0 |
attr |
Scheduler.use_pp |
1 | 0 | 0 |
attr |
Scheduler.use_v2_model_runner |
1 | 0 | 0 |
attr |
Scheduler.has_mamba_layers |
1 | 0 | 0 |
attr |
Scheduler.needs_kv_cache_zeroing |
1 | 0 | 0 |
attr |
Scheduler.need_mamba_block_aligned_split |
1 | 0 | 0 |
attr |
Scheduler.policy |
1 | 0 | 0 |
attr |
Scheduler.routed_experts_reader |
1 | 0 | 0 |
attr |
Scheduler.max_num_kv_tokens |
1 | 0 | 0 |
vllm.v1.core.single_type_kv_cache_manager (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SingleTypeKVCacheManager._get_num_evictable_blocks |
2 | 1 | 0 |
attr |
SingleTypeKVCacheManager.block_size |
1 | 0 | 0 |
attr |
SingleTypeKVCacheManager.dcp_world_size |
1 | 0 | 0 |
attr |
SingleTypeKVCacheManager.pcp_world_size |
1 | 0 | 0 |
attr |
SingleTypeKVCacheManager.kv_cache_spec |
1 | 0 | 0 |
attr |
SingleTypeKVCacheManager.block_pool |
1 | 0 | 0 |
attr |
SingleTypeKVCacheManager.enable_caching |
1 | 0 | 0 |
attr |
SingleTypeKVCacheManager.kv_cache_group_id |
1 | 0 | 0 |
meth |
SinkFullAttentionManager.init |
7 | 6 | 0 |
attr |
SinkFullAttentionManager.sink_blocks |
1 | 0 | 0 |
meth |
SlidingWindowManager.init |
3 | 2 | 0 |
attr |
SlidingWindowManager.sliding_window |
1 | 0 | 0 |
meth |
MambaManager.init |
4 | 3 | 0 |
attr |
MambaManager.mamba_cache_mode |
1 | 0 | 0 |
meth |
ChunkedLocalAttentionManager.init |
3 | 2 | 0 |
attr |
ChunkedLocalAttentionManager.attention_chunk_size |
1 | 0 | 0 |
func |
get_manager_for_kv_cache_spec |
3 | 2 | 0 |
vllm.v1.cudagraph_dispatcher (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CudagraphDispatcher.init |
2 | 1 | 0 |
meth |
CudagraphDispatcher.add_cudagraph_key |
3 | 2 | 0 |
meth |
CudagraphDispatcher.initialize_cudagraph_keys |
3 | 2 | 0 |
attr |
CudagraphDispatcher.vllm_config |
1 | 0 | 0 |
attr |
CudagraphDispatcher.compilation_config |
1 | 0 | 0 |
attr |
CudagraphDispatcher.uniform_decode_query_len |
1 | 0 | 0 |
attr |
CudagraphDispatcher.keys_initialized |
1 | 0 | 0 |
attr |
CudagraphDispatcher.specialize_lora_count |
1 | 0 | 0 |
attr |
CudagraphDispatcher.cudagraph_mode |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.engine (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EngineCoreOutputs.post_init |
1 | 0 | 0 |
meth |
FinishReason.str |
1 | 0 | 0 |
vllm.v1.engine.async_llm (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AsyncLLM.del |
1 | 0 | 0 |
meth |
AsyncLLM.shutdown |
1 | 0 | 0 |
meth |
AsyncLLM._add_request |
6 | 5 | 0 |
meth |
AsyncLLM._validate_streaming_input_sampling_params |
2 | 1 | 0 |
meth |
AsyncLLM._run_output_handler |
1 | 0 | 0 |
meth |
AsyncLLM.collective_rpc |
5 | 4 | 0 |
meth |
AsyncLLM.wait_for_requests_to_drain |
2 | 1 | 0 |
meth |
AsyncLLM.scale_elastic_ep |
3 | 2 | 0 |
attr |
AsyncLLM.vllm_config |
1 | 0 | 0 |
attr |
AsyncLLM.model_config |
1 | 0 | 0 |
attr |
AsyncLLM.observability_config |
1 | 0 | 0 |
attr |
AsyncLLM.log_requests |
1 | 0 | 0 |
attr |
AsyncLLM.log_stats |
1 | 0 | 0 |
attr |
AsyncLLM.renderer |
1 | 0 | 0 |
attr |
AsyncLLM.io_processor |
1 | 0 | 0 |
attr |
AsyncLLM.input_processor |
1 | 0 | 0 |
attr |
AsyncLLM.output_processor |
1 | 0 | 0 |
attr |
AsyncLLM.engine_core |
1 | 0 | 0 |
attr |
AsyncLLM.profiler |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
InputStreamError.init |
2 | 1 | 0 |
attr |
InputStreamError.cause |
1 | 0 | 0 |
vllm.v1.engine.coordinator (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPCoordinator.init |
3 | 2 | 0 |
meth |
DPCoordinator.close |
1 | 0 | 0 |
attr |
DPCoordinator.stats_publish_address |
1 | 0 | 0 |
attr |
DPCoordinator.coord_in_address |
1 | 0 | 0 |
attr |
DPCoordinator.coord_out_address |
1 | 0 | 0 |
meth |
DPCoordinatorProc.init |
4 | 3 | 0 |
meth |
DPCoordinatorProc.run_coordinator |
7 | 6 | 0 |
meth |
DPCoordinatorProc.process_input_socket |
4 | 3 | 0 |
meth |
DPCoordinatorProc._send_start_wave |
4 | 3 | 0 |
meth |
DPCoordinatorProc._get_engine_counts |
2 | 1 | 0 |
attr |
DPCoordinatorProc.ctx |
1 | 0 | 0 |
attr |
DPCoordinatorProc.engines |
1 | 0 | 0 |
attr |
DPCoordinatorProc.stats_update_interval_ms |
1 | 0 | 0 |
attr |
DPCoordinatorProc.enable_wave_coordination |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
EngineState.init |
1 | 0 | 0 |
attr |
EngineState.request_counts |
1 | 0 | 0 |
vllm.v1.engine.core (75 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EngineCoreActorMixin.init |
5 | 4 | 0 |
meth |
EngineCoreActorMixin._set_visible_devices |
3 | 2 | 0 |
meth |
EngineCoreActorMixin._set_cuda_visible_devices |
4 | 3 | 0 |
meth |
EngineCoreActorMixin._perform_handshakes |
6 | 5 | 0 |
meth |
EngineCoreActorMixin.wait_for_init |
1 | 0 | 0 |
meth |
EngineCoreActorMixin.run |
1 | 0 | 0 |
attr |
EngineCoreActorMixin.addresses |
1 | 0 | 0 |
meth |
DPMoEEngineCoreActor.init |
8 | 7 | 0 |
meth |
DPEngineCoreProc.init |
7 | 6 | 0 |
meth |
DPEngineCoreProc._init_data_parallel |
2 | 1 | 0 |
meth |
DPEngineCoreProc.shutdown |
1 | 0 | 0 |
meth |
DPEngineCoreProc.add_request |
3 | 2 | 0 |
meth |
DPEngineCoreProc.resume_scheduler |
1 | 0 | 0 |
meth |
DPEngineCoreProc._handle_client_request |
3 | 3 | 1 |
meth |
DPEngineCoreProc._maybe_publish_request_counts |
1 | 0 | 0 |
meth |
DPEngineCoreProc.run_busy_loop |
1 | 0 | 0 |
meth |
DPEngineCoreProc._eep_send_engine_core_notification |
3 | 2 | 0 |
meth |
DPEngineCoreProc.eep_handle_engine_core_notification |
2 | 1 | 0 |
meth |
DPEngineCoreProc._eep_scale_up_before_kv_init |
1 | 0 | 0 |
attr |
DPEngineCoreProc.step_counter |
1 | 0 | 0 |
attr |
DPEngineCoreProc.current_wave |
1 | 0 | 0 |
attr |
DPEngineCoreProc.last_counts |
1 | 0 | 0 |
meth |
EngineCoreActor.init |
8 | 7 | 0 |
meth |
EngineCore.init |
6 | 5 | 0 |
meth |
EngineCore.add_request |
3 | 2 | 0 |
meth |
EngineCore.abort_requests |
2 | 1 | 0 |
meth |
EngineCore.log_error_detail |
2 | 1 | 0 |
meth |
EngineCore.log_iteration_details |
2 | 1 | 0 |
meth |
EngineCore._process_aborts_queue |
1 | 0 | 0 |
meth |
EngineCore.shutdown |
1 | 0 | 0 |
meth |
EngineCore.profile |
3 | 2 | 0 |
meth |
EngineCore.reset_mm_cache |
1 | 0 | 0 |
meth |
EngineCore._reset_caches |
2 | 1 | 0 |
meth |
EngineCore.wake_up |
2 | 1 | 0 |
meth |
EngineCore.execute_dummy_batch |
1 | 0 | 0 |
meth |
EngineCore._eep_scale_up_before_kv_init |
1 | 0 | 0 |
meth |
EngineCore._eep_send_engine_core_notification |
3 | 2 | 0 |
attr |
EngineCore.vllm_config |
1 | 0 | 0 |
attr |
EngineCore.log_stats |
1 | 0 | 0 |
attr |
EngineCore.model_executor |
1 | 0 | 0 |
attr |
EngineCore.available_gpu_memory_for_kv_cache |
1 | 0 | 0 |
attr |
EngineCore.structured_output_manager |
1 | 0 | 0 |
attr |
EngineCore.use_spec_decode |
1 | 0 | 0 |
attr |
EngineCore.mm_registry |
1 | 0 | 0 |
attr |
EngineCore.mm_receiver_cache |
1 | 0 | 0 |
attr |
EngineCore.batch_queue_size |
1 | 0 | 0 |
attr |
EngineCore.is_ec_producer |
1 | 0 | 0 |
attr |
EngineCore.is_pooling_model |
1 | 0 | 0 |
attr |
EngineCore.step_fn |
1 | 0 | 0 |
attr |
EngineCore.async_scheduling |
1 | 0 | 0 |
attr |
EngineCore.aborts_queue |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
EngineCoreProc.init |
8 | 7 | 0 |
meth |
EngineCoreProc.run_engine_core |
5 | 2 | 0 |
meth |
EngineCoreProc._init_data_parallel |
2 | 1 | 0 |
meth |
EngineCoreProc.run_busy_loop |
1 | 0 | 0 |
meth |
EngineCoreProc._process_input_queue |
1 | 0 | 0 |
meth |
EngineCoreProc._handle_client_request |
3 | 3 | 1 |
meth |
EngineCoreProc._invoke_utility_method |
5 | 4 | 0 |
meth |
EngineCoreProc._convert_msgspec_args |
3 | 0 | 0 |
meth |
EngineCoreProc._send_engine_dead |
1 | 0 | 0 |
meth |
EngineCoreProc.process_input_sockets |
5 | 4 | 0 |
meth |
EngineCoreProc.process_output_sockets |
4 | 3 | 0 |
attr |
EngineCoreProc.input_queue |
1 | 0 | 0 |
attr |
EngineCoreProc.output_queue |
1 | 0 | 0 |
attr |
EngineCoreProc.engine_index |
1 | 0 | 0 |
attr |
EngineCoreProc.engines_running |
1 | 0 | 0 |
attr |
EngineCoreProc.client_count |
1 | 0 | 0 |
attr |
EngineCoreProc.has_coordinator |
1 | 0 | 0 |
attr |
EngineCoreProc.frontend_stats_publish_address |
1 | 0 | 0 |
attr |
EngineCoreProc.publish_dp_lb_stats |
1 | 0 | 0 |
attr |
EngineCoreProc.process_input_queue_block |
1 | 0 | 0 |
attr |
EngineCoreProc.output_thread |
1 | 0 | 0 |
vllm.v1.engine.core_client (50 missing, 8 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SyncMPClient.init |
4 | 3 | 0 |
meth |
SyncMPClient._send_input |
3 | 2 | 1 |
meth |
SyncMPClient.call_utility |
3 | 2 | 1 |
attr |
SyncMPClient.is_dp |
1 | 0 | 0 |
attr |
SyncMPClient.outputs_queue |
1 | 0 | 0 |
attr |
SyncMPClient.output_queue_thread |
1 | 0 | 0 |
meth |
InprocClient.init |
3 | 0 | 0 |
attr |
InprocClient.engine_core |
1 | 0 | 0 |
meth |
MPClient.init |
6 | 5 | 0 |
meth |
MPClient.shutdown |
1 | 0 | 0 |
meth |
MPClient.ensure_alive |
1 | 0 | 0 |
meth |
MPClient.add_pending_message |
3 | 2 | 1 |
meth |
MPClient.free_pending_messages |
1 | 0 | 0 |
meth |
MPClient.start_engine_core_monitor |
1 | 0 | 0 |
attr |
MPClient.vllm_config |
1 | 0 | 0 |
attr |
MPClient.encoder |
1 | 0 | 0 |
attr |
MPClient.decoder |
1 | 0 | 0 |
attr |
MPClient.ctx |
1 | 0 | 0 |
attr |
MPClient.resources |
1 | 0 | 0 |
attr |
MPClient.engines_running |
1 | 0 | 0 |
attr |
MPClient.engine_ranks_managed |
1 | 0 | 0 |
attr |
MPClient.pending_messages |
1 | 0 | 0 |
attr |
MPClient.input_socket |
1 | 0 | 0 |
func |
allocate_stateless_group_ports |
3 | 1 | 0 |
meth |
BackgroundResources.call |
1 | 0 | 0 |
meth |
BackgroundResources.validate_alive |
2 | 1 | 0 |
meth |
EngineCoreClient.shutdown |
1 | 0 | 0 |
meth |
DPAsyncMPClient.init |
7 | 6 | 0 |
meth |
DPAsyncMPClient._ensure_stats_update_task |
1 | 0 | 0 |
meth |
DPAsyncMPClient.get_core_engine_for_request |
2 | 1 | 0 |
attr |
DPAsyncMPClient.current_wave |
1 | 0 | 0 |
attr |
DPAsyncMPClient.first_req_sock_addr |
1 | 0 | 0 |
attr |
DPAsyncMPClient.first_req_send_socket |
1 | 0 | 0 |
meth |
DPLBAsyncMPClient.init |
7 | 6 | 0 |
meth |
DPLBAsyncMPClient.call_utility_async |
3 | 2 | 1 |
meth |
DPLBAsyncMPClient.process_engine_outputs |
3 | 2 | 0 |
meth |
DPLBAsyncMPClient.eep_process_engine_core_notification |
3 | 2 | 0 |
attr |
DPLBAsyncMPClient.client_count |
1 | 0 | 0 |
attr |
DPLBAsyncMPClient.eng_start_index |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
AsyncMPClient.init |
7 | 6 | 0 |
meth |
AsyncMPClient._ensure_output_queue_task |
1 | 0 | 0 |
meth |
AsyncMPClient._send_input |
4 | 4 | 1 |
meth |
AsyncMPClient._send_input_message |
4 | 4 | 1 |
meth |
AsyncMPClient.call_utility_async |
3 | 2 | 1 |
meth |
AsyncMPClient._call_utility_async |
4 | 3 | 1 |
attr |
AsyncMPClient.client_count |
1 | 0 | 0 |
attr |
AsyncMPClient.client_index |
1 | 0 | 0 |
attr |
AsyncMPClient.outputs_queue |
1 | 0 | 0 |
vllm.v1.engine.detokenizer (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SlowIncrementalDetokenizer.init |
3 | 2 | 0 |
attr |
SlowIncrementalDetokenizer.tokenizer |
1 | 0 | 0 |
attr |
SlowIncrementalDetokenizer.prompt_len |
1 | 0 | 0 |
attr |
SlowIncrementalDetokenizer.skip_special_tokens |
1 | 0 | 0 |
attr |
SlowIncrementalDetokenizer.spaces_between_special_tokens |
1 | 0 | 0 |
attr |
SlowIncrementalDetokenizer.tokens |
1 | 0 | 0 |
attr |
SlowIncrementalDetokenizer.prefix_offset |
1 | 0 | 0 |
attr |
SlowIncrementalDetokenizer.read_offset |
1 | 0 | 0 |
meth |
IncrementalDetokenizer.init |
1 | 0 | 0 |
attr |
USE_FAST_DETOKENIZER |
1 | 0 | 0 |
meth |
BaseIncrementalDetokenizer.init |
2 | 1 | 0 |
attr |
BaseIncrementalDetokenizer.stop |
1 | 0 | 0 |
attr |
BaseIncrementalDetokenizer.min_tokens |
1 | 0 | 0 |
attr |
BaseIncrementalDetokenizer.include_stop_str_in_output |
1 | 0 | 0 |
attr |
BaseIncrementalDetokenizer.output_text |
1 | 0 | 0 |
attr |
BaseIncrementalDetokenizer.stop_buffer_length |
1 | 0 | 0 |
meth |
FastIncrementalDetokenizer.init |
3 | 2 | 0 |
attr |
FastIncrementalDetokenizer.request_id |
1 | 0 | 0 |
attr |
FastIncrementalDetokenizer.skip_special_tokens |
1 | 0 | 0 |
attr |
FastIncrementalDetokenizer.stream |
1 | 0 | 0 |
attr |
FastIncrementalDetokenizer.spaces_between_special_tokens |
1 | 0 | 0 |
attr |
FastIncrementalDetokenizer.last_special |
1 | 0 | 0 |
attr |
FastIncrementalDetokenizer.added_token_ids |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.engine.exceptions (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EngineDeadError.init |
4 | 1 | 0 |
vllm.v1.engine.input_processor (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InputProcessor.assign_request_id |
2 | 1 | 0 |
meth |
InputProcessor._validate_prompt_len |
3 | 2 | 0 |
meth |
InputProcessor._validate_model_inputs |
3 | 2 | 0 |
attr |
InputProcessor.vllm_config |
1 | 0 | 0 |
attr |
InputProcessor.model_config |
1 | 0 | 0 |
attr |
InputProcessor.cache_config |
1 | 0 | 0 |
attr |
InputProcessor.lora_config |
1 | 0 | 0 |
attr |
InputProcessor.scheduler_config |
1 | 0 | 0 |
attr |
InputProcessor.speculative_config |
1 | 0 | 0 |
attr |
InputProcessor.structured_outputs_config |
1 | 0 | 0 |
attr |
InputProcessor.observability_config |
1 | 0 | 0 |
attr |
InputProcessor.generation_config_fields |
1 | 0 | 0 |
attr |
InputProcessor.renderer |
1 | 0 | 0 |
attr |
InputProcessor.supports_mm_inputs |
1 | 0 | 0 |
attr |
InputProcessor.mm_encoder_cache_size |
1 | 0 | 0 |
attr |
InputProcessor.skip_prompt_length_check |
1 | 0 | 0 |
attr |
InputProcessor.input_preprocessor |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.engine.llm_engine (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LLMEngine.start_profile |
2 | 1 | 0 |
meth |
LLMEngine.stop_profile |
1 | 0 | 0 |
meth |
LLMEngine.reset_mm_cache |
1 | 0 | 0 |
meth |
LLMEngine.sleep |
3 | 2 | 0 |
meth |
LLMEngine.wake_up |
2 | 1 | 0 |
meth |
LLMEngine.del |
1 | 0 | 0 |
attr |
LLMEngine.vllm_config |
1 | 0 | 0 |
attr |
LLMEngine.model_config |
1 | 0 | 0 |
attr |
LLMEngine.observability_config |
1 | 0 | 0 |
attr |
LLMEngine.log_stats |
1 | 0 | 0 |
attr |
LLMEngine.external_launcher_dp |
1 | 0 | 0 |
attr |
LLMEngine.should_execute_dummy_batch |
1 | 0 | 0 |
attr |
LLMEngine.renderer |
1 | 0 | 0 |
attr |
LLMEngine.io_processor |
1 | 0 | 0 |
attr |
LLMEngine.input_processor |
1 | 0 | 0 |
attr |
LLMEngine.output_processor |
1 | 0 | 0 |
attr |
LLMEngine.engine_core |
1 | 0 | 0 |
attr |
LLMEngine.dp_group |
1 | 0 | 0 |
attr |
LLMEngine.model_executor |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.engine.logprobs (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
NONES |
1 | 0 | 0 |
vllm.v1.engine.output_processor (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RequestOutputCollector.init |
3 | 2 | 0 |
meth |
RequestOutputCollector.close |
1 | 0 | 0 |
meth |
RequestOutputCollector.del |
1 | 0 | 0 |
attr |
RequestOutputCollector.aggregate |
1 | 0 | 0 |
attr |
RequestOutputCollector.request_id |
1 | 0 | 0 |
attr |
RequestOutputCollector.ready |
1 | 0 | 0 |
meth |
RequestState.init |
21 | 20 | 0 |
attr |
RequestState.request_id |
1 | 0 | 0 |
attr |
RequestState.external_req_id |
1 | 0 | 0 |
attr |
RequestState.parent_req |
1 | 0 | 0 |
attr |
RequestState.request_index |
1 | 0 | 0 |
attr |
RequestState.lora_request |
1 | 0 | 0 |
attr |
RequestState.lora_name |
1 | 0 | 0 |
attr |
RequestState.output_kind |
1 | 0 | 0 |
attr |
RequestState.prompt |
1 | 0 | 0 |
attr |
RequestState.prompt_token_ids |
1 | 0 | 0 |
attr |
RequestState.prompt_embeds |
1 | 0 | 0 |
attr |
RequestState.prompt_len |
1 | 0 | 0 |
attr |
RequestState.logprobs_processor |
1 | 0 | 0 |
attr |
RequestState.detokenizer |
1 | 0 | 0 |
attr |
RequestState.max_tokens_param |
1 | 0 | 0 |
attr |
RequestState.top_p |
1 | 0 | 0 |
attr |
RequestState.n |
1 | 0 | 0 |
attr |
RequestState.temperature |
1 | 0 | 0 |
attr |
RequestState.is_prefilling |
1 | 0 | 0 |
attr |
RequestState.queue |
1 | 0 | 0 |
attr |
RequestState.num_cached_tokens |
1 | 0 | 0 |
attr |
RequestState.stats |
1 | 0 | 0 |
attr |
RequestState.stream_interval |
1 | 0 | 0 |
attr |
RequestState.sent_tokens_offset |
1 | 0 | 0 |
attr |
RequestState.streaming_input |
1 | 0 | 0 |
attr |
EMPTY_CPU_TENSOR |
1 | 0 | 0 |
meth |
OutputProcessor.init |
5 | 4 | 0 |
meth |
OutputProcessor.get_num_unfinished_requests |
1 | 0 | 0 |
meth |
OutputProcessor.propagate_error |
2 | 1 | 0 |
meth |
OutputProcessor.update_scheduler_stats |
2 | 1 | 0 |
meth |
OutputProcessor._update_stats_from_output |
5 | 4 | 0 |
meth |
OutputProcessor._update_stats_from_finished |
4 | 3 | 0 |
attr |
OutputProcessor.log_stats |
1 | 0 | 0 |
attr |
OutputProcessor.tokenizer |
1 | 0 | 0 |
attr |
OutputProcessor.stream_interval |
1 | 0 | 0 |
attr |
OutputProcessor.lora_states |
1 | 0 | 0 |
attr |
OutputProcessor.tracing_enabled |
1 | 0 | 0 |
vllm.v1.engine.parallel_sampling (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ParentRequest.observe_num_generation_tokens |
2 | 1 | 0 |
meth |
ParentRequest.observe_finished_request |
4 | 3 | 0 |
vllm.v1.engine.utils (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CoreEngineActorManager.init |
7 | 6 | 0 |
meth |
CoreEngineActorManager.get_run_refs |
1 | 0 | 0 |
meth |
CoreEngineActorManager.close |
1 | 0 | 0 |
attr |
CoreEngineActorManager.env_vars_dict |
1 | 0 | 0 |
attr |
CoreEngineActorManager.addresses |
1 | 0 | 0 |
attr |
CoreEngineActorManager.executor_class |
1 | 0 | 0 |
attr |
CoreEngineActorManager.log_stats |
1 | 0 | 0 |
attr |
CoreEngineActorManager.placement_group_is_local |
1 | 0 | 0 |
attr |
CoreEngineActorManager.run_refs |
1 | 0 | 0 |
attr |
CoreEngineActorManager.created_placement_groups |
1 | 0 | 0 |
meth |
CoreEngineProcManager.init |
11 | 10 | 0 |
meth |
CoreEngineProcManager.close |
1 | 0 | 0 |
meth |
CoreEngineProcManager.join_first |
1 | 0 | 0 |
func |
get_device_indices |
5 | 4 | 0 |
meth |
CoreEngine.init |
3 | 2 | 0 |
attr |
CoreEngine.local |
1 | 0 | 0 |
attr |
CoreEngine.identity |
1 | 0 | 0 |
attr |
CoreEngine.state |
1 | 0 | 0 |
func |
wait_for_engine_startup |
9 | 8 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.executor.abstract (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Executor.register_failure_callback |
2 | 1 | 0 |
meth |
Executor.profile |
3 | 2 | 0 |
meth |
Executor.sleep |
2 | 1 | 0 |
meth |
Executor.wake_up |
2 | 1 | 0 |
attr |
Executor.vllm_config |
1 | 0 | 0 |
attr |
Executor.model_config |
1 | 0 | 0 |
attr |
Executor.cache_config |
1 | 0 | 0 |
attr |
Executor.lora_config |
1 | 0 | 0 |
attr |
Executor.load_config |
1 | 0 | 0 |
attr |
Executor.parallel_config |
1 | 0 | 0 |
attr |
Executor.scheduler_config |
1 | 0 | 0 |
attr |
Executor.device_config |
1 | 0 | 0 |
attr |
Executor.speculative_config |
1 | 0 | 0 |
attr |
Executor.observability_config |
1 | 0 | 0 |
attr |
Executor.is_sleeping |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.executor.multiproc_executor (28 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
MultiprocExecutor.init |
3 | 2 | 0 |
meth |
MultiprocExecutor.start_worker_monitor |
2 | 1 | 0 |
meth |
MultiprocExecutor.register_failure_callback |
2 | 1 | 0 |
meth |
MultiprocExecutor.collective_rpc |
8 | 8 | 1 |
meth |
MultiprocExecutor._ensure_worker_termination |
2 | 1 | 0 |
meth |
MultiprocExecutor.shutdown |
1 | 0 | 0 |
attr |
MultiprocExecutor.monitor_workers |
1 | 0 | 0 |
meth |
WorkerProc.init |
8 | 7 | 0 |
meth |
WorkerProc.make_worker_process |
8 | 7 | 0 |
meth |
WorkerProc.shutdown |
1 | 0 | 0 |
meth |
WorkerProc.worker_main |
3 | 0 | 0 |
meth |
WorkerProc.enqueue_output |
2 | 1 | 1 |
meth |
WorkerProc.handle_output |
2 | 1 | 1 |
meth |
WorkerProc.async_output_busy_loop |
1 | 0 | 0 |
meth |
WorkerProc.worker_busy_loop |
2 | 1 | 0 |
attr |
WorkerProc.rank |
1 | 0 | 0 |
attr |
WorkerProc.worker |
1 | 0 | 0 |
attr |
WorkerProc.use_async_scheduling |
1 | 0 | 0 |
attr |
WorkerProc.async_output_copy_thread |
1 | 0 | 0 |
func |
set_multiprocessing_worker_envs |
1 | 0 | 0 |
meth |
FutureWrapper.init |
3 | 2 | 0 |
meth |
FutureWrapper.result |
2 | 0 | 0 |
meth |
FutureWrapper.wait_for_response |
2 | 1 | 0 |
attr |
FutureWrapper.futures_queue |
1 | 0 | 0 |
attr |
FutureWrapper.aggregate |
1 | 0 | 0 |
vllm.v1.executor.ray_executor (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RayDistributedExecutor._configure_ray_workers_use_nsight |
2 | 1 | 0 |
meth |
RayDistributedExecutor._update_noset_device_env_vars |
2 | 0 | 0 |
meth |
RayDistributedExecutor._get_env_vars_to_be_updated |
1 | 0 | 0 |
meth |
RayDistributedExecutor._init_workers_ray |
3 | 1 | 0 |
meth |
RayDistributedExecutor._check_ray_cgraph_installation |
1 | 0 | 0 |
meth |
RayDistributedExecutor._compiled_ray_dag |
2 | 1 | 0 |
meth |
RayDistributedExecutor.del |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.executor.ray_utils (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
initialize_ray_cluster |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
assert_ray_available |
1 | 0 | 0 |
meth |
FutureWrapper.init |
3 | 1 | 0 |
meth |
FutureWrapper.result |
2 | 0 | 0 |
attr |
FutureWrapper.ref_or_refs |
1 | 0 | 0 |
attr |
FutureWrapper.aggregator |
1 | 0 | 0 |
meth |
RayWorkerWrapper.init |
3 | 1 | 0 |
meth |
RayWorkerWrapper.setup_device_if_necessary |
1 | 0 | 0 |
meth |
RayWorkerWrapper.override_env_vars |
2 | 1 | 0 |
meth |
RayWorkerWrapper._is_intermediate_tensors |
2 | 1 | 0 |
attr |
RayWorkerWrapper.compiled_dag_cuda_device_set |
1 | 0 | 0 |
vllm.v1.executor.uniproc_executor (1 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UniProcExecutor.collective_rpc |
7 | 7 | 1 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.kv_cache_interface (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
FullAttentionSpec.post_init |
1 | 0 | 0 |
vllm.v1.kv_offload.abstract (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OffloadingManager.touch |
2 | 1 | 0 |
meth |
OffloadingManager.complete_load |
2 | 1 | 0 |
meth |
OffloadingManager.complete_store |
3 | 2 | 0 |
vllm.v1.kv_offload.arc_manager (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ARCOffloadingManager.init |
3 | 2 | 0 |
meth |
ARCOffloadingManager.touch |
2 | 1 | 0 |
meth |
ARCOffloadingManager.complete_load |
2 | 1 | 0 |
meth |
ARCOffloadingManager.complete_store |
3 | 2 | 0 |
vllm.v1.kv_offload.backend (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlockStatus.init |
1 | 0 | 0 |
attr |
BlockStatus.ref_cnt |
1 | 0 | 0 |
meth |
Backend.init |
3 | 2 | 0 |
meth |
Backend.get_num_free_blocks |
1 | 0 | 0 |
meth |
Backend.free |
2 | 1 | 0 |
attr |
Backend.block_size |
1 | 0 | 0 |
attr |
Backend.medium |
1 | 0 | 0 |
vllm.v1.kv_offload.backends.cpu (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CPUBlockStatus.init |
2 | 1 | 0 |
attr |
CPUBlockStatus.block_id |
1 | 0 | 0 |
meth |
CPUBackend.init |
3 | 2 | 0 |
meth |
CPUBackend.get_num_free_blocks |
1 | 0 | 0 |
meth |
CPUBackend.free |
2 | 1 | 0 |
vllm.v1.kv_offload.cpu (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CPUOffloadingSpec.init |
3 | 2 | 0 |
attr |
CPUOffloadingSpec.num_blocks |
1 | 0 | 0 |
vllm.v1.kv_offload.factory (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.v1.kv_offload.lru_manager (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LRUOffloadingManager.init |
3 | 2 | 0 |
meth |
LRUOffloadingManager.touch |
2 | 1 | 0 |
meth |
LRUOffloadingManager.complete_load |
2 | 1 | 0 |
meth |
LRUOffloadingManager.complete_store |
3 | 2 | 0 |
vllm.v1.kv_offload.mediums (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlockIDsLoadStoreSpec.init |
2 | 1 | 0 |
attr |
BlockIDsLoadStoreSpec.block_ids |
1 | 0 | 0 |
vllm.v1.kv_offload.spec (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
OffloadingSpec.init |
3 | 2 | 0 |
attr |
OffloadingSpec.vllm_config |
1 | 0 | 0 |
attr |
OffloadingSpec.kv_cache_config |
1 | 0 | 0 |
attr |
OffloadingSpec.extra_config |
1 | 0 | 0 |
attr |
OffloadingSpec.gpu_block_size |
1 | 0 | 0 |
attr |
OffloadingSpec.offloaded_block_size |
1 | 0 | 0 |
vllm.v1.kv_offload.worker.cpu_gpu (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
expand_block_ids |
5 | 4 | 0 |
meth |
CpuGpuOffloadingHandlers.init |
6 | 5 | 0 |
attr |
CpuGpuOffloadingHandlers.gpu_to_cpu_handler |
1 | 0 | 0 |
attr |
CpuGpuOffloadingHandlers.cpu_to_gpu_handler |
1 | 0 | 0 |
meth |
SingleDirectionOffloadingHandler.init |
5 | 4 | 0 |
meth |
SingleDirectionOffloadingHandler.wait |
2 | 1 | 0 |
attr |
SingleDirectionOffloadingHandler.block_size_in_bytes |
1 | 0 | 0 |
attr |
SingleDirectionOffloadingHandler.total_block_size_in_bytes |
1 | 0 | 0 |
attr |
SingleDirectionOffloadingHandler.transfer_type |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.kv_offload.worker.worker (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OffloadingWorker.init |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.metrics.loggers (98 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
StatLoggerBase.init |
3 | 2 | 0 |
meth |
StatLoggerBase.record |
5 | 4 | 0 |
meth |
StatLoggerBase.log_engine_initialized |
1 | 0 | 0 |
meth |
StatLoggerBase.log |
1 | 0 | 0 |
meth |
StatLoggerBase.record_sleep_state |
3 | 2 | 0 |
meth |
PerEngineStatLoggerAdapter.record |
5 | 4 | 0 |
meth |
PerEngineStatLoggerAdapter.log |
1 | 0 | 0 |
meth |
PerEngineStatLoggerAdapter.log_engine_initialized |
1 | 0 | 0 |
attr |
PerEngineStatLoggerAdapter.per_engine_stat_loggers |
1 | 0 | 0 |
attr |
PerEngineStatLoggerAdapter.engine_indexes |
1 | 0 | 0 |
meth |
PrometheusStatLogger.init |
3 | 2 | 0 |
meth |
PrometheusStatLogger.log_metrics_info |
3 | 2 | 0 |
meth |
PrometheusStatLogger.record |
5 | 4 | 0 |
meth |
PrometheusStatLogger.record_sleep_state |
3 | 2 | 0 |
meth |
PrometheusStatLogger.log_engine_initialized |
1 | 0 | 0 |
attr |
PrometheusStatLogger.engine_indexes |
1 | 0 | 0 |
attr |
PrometheusStatLogger.vllm_config |
1 | 0 | 0 |
attr |
PrometheusStatLogger.show_hidden_metrics |
1 | 0 | 0 |
attr |
PrometheusStatLogger.kv_cache_metrics_enabled |
1 | 0 | 0 |
attr |
PrometheusStatLogger.spec_decoding_prom |
1 | 0 | 0 |
attr |
PrometheusStatLogger.kv_connector_prom |
1 | 0 | 0 |
attr |
PrometheusStatLogger.perf_metrics_prom |
1 | 0 | 0 |
attr |
PrometheusStatLogger.gauge_scheduler_running |
1 | 0 | 0 |
attr |
PrometheusStatLogger.gauge_scheduler_waiting |
1 | 0 | 0 |
attr |
PrometheusStatLogger.gauge_engine_sleep_state |
1 | 0 | 0 |
attr |
PrometheusStatLogger.gauge_kv_cache_usage |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_prefix_cache_queries |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_prefix_cache_hits |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_connector_prefix_cache_queries |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_connector_prefix_cache_hits |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_mm_cache_queries |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_mm_cache_hits |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_num_preempted_reqs |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_prompt_tokens |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_prompt_tokens_cached |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_prompt_tokens_recomputed |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_generation_tokens |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_num_prompt_tokens_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_num_generation_tokens_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_iteration_tokens |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_max_num_generation_tokens_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_n_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_max_tokens_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_time_to_first_token |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_inter_token_latency |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_request_time_per_output_token |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_e2e_time_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_queue_time_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_inference_time_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_prefill_time_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_decode_time_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_prefill_kv_computed_request |
1 | 0 | 0 |
attr |
PrometheusStatLogger.counter_corrupted_requests |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_kv_block_lifetime |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_kv_block_idle_before_evict |
1 | 0 | 0 |
attr |
PrometheusStatLogger.histogram_kv_block_reuse_gap |
1 | 0 | 0 |
attr |
PrometheusStatLogger.labelname_max_lora |
1 | 0 | 0 |
attr |
PrometheusStatLogger.labelname_waiting_lora_adapters |
1 | 0 | 0 |
attr |
PrometheusStatLogger.labelname_running_lora_adapters |
1 | 0 | 0 |
attr |
PrometheusStatLogger.max_lora |
1 | 0 | 0 |
meth |
AggregateStatLoggerBase.init |
3 | 2 | 0 |
meth |
AggregatedLoggingStatLogger.init |
3 | 2 | 0 |
meth |
AggregatedLoggingStatLogger.record |
5 | 4 | 0 |
meth |
AggregatedLoggingStatLogger.aggregate_scheduler_stats |
1 | 0 | 0 |
meth |
AggregatedLoggingStatLogger.log |
1 | 0 | 0 |
meth |
AggregatedLoggingStatLogger.log_engine_initialized |
1 | 0 | 0 |
prop |
AggregatedLoggingStatLogger.log_prefix |
1 | 0 | 0 |
attr |
AggregatedLoggingStatLogger.engine_indexes |
1 | 0 | 0 |
attr |
AggregatedLoggingStatLogger.aggregated |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
StatLoggerManager.init |
7 | 6 | 0 |
meth |
StatLoggerManager.record |
5 | 4 | 0 |
meth |
StatLoggerManager.record_sleep_state |
3 | 2 | 0 |
meth |
StatLoggerManager.log |
1 | 0 | 0 |
meth |
StatLoggerManager.log_engine_initialized |
1 | 0 | 0 |
attr |
StatLoggerManager.engine_indexes |
1 | 0 | 0 |
meth |
LoggingStatLogger.init |
3 | 2 | 0 |
meth |
LoggingStatLogger._reset |
2 | 0 | 0 |
meth |
LoggingStatLogger._track_iteration_stats |
2 | 1 | 0 |
meth |
LoggingStatLogger.record |
5 | 4 | 0 |
meth |
LoggingStatLogger._update_stats |
1 | 0 | 0 |
meth |
LoggingStatLogger.aggregate_scheduler_stats |
1 | 0 | 0 |
meth |
LoggingStatLogger.log |
1 | 0 | 0 |
meth |
LoggingStatLogger.log_engine_initialized |
1 | 0 | 0 |
prop |
LoggingStatLogger.log_prefix |
1 | 0 | 0 |
attr |
LoggingStatLogger.engine_index |
1 | 0 | 0 |
attr |
LoggingStatLogger.vllm_config |
1 | 0 | 0 |
attr |
LoggingStatLogger.last_scheduler_stats |
1 | 0 | 0 |
attr |
LoggingStatLogger.prefix_caching_metrics |
1 | 0 | 0 |
attr |
LoggingStatLogger.connector_prefix_caching_metrics |
1 | 0 | 0 |
attr |
LoggingStatLogger.mm_caching_metrics |
1 | 0 | 0 |
attr |
LoggingStatLogger.spec_decoding_logging |
1 | 0 | 0 |
attr |
LoggingStatLogger.kv_connector_logging |
1 | 0 | 0 |
attr |
LoggingStatLogger.cudagraph_logging |
1 | 0 | 0 |
attr |
LoggingStatLogger.engine_is_idle |
1 | 0 | 0 |
attr |
LoggingStatLogger.aggregated |
1 | 0 | 0 |
attr |
LoggingStatLogger.perf_metrics_logging |
1 | 0 | 0 |
vllm.v1.metrics.perf (21 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
make_per_engine |
3 | 2 | 0 |
meth |
PerfMetricsProm.init |
4 | 3 | 0 |
meth |
PerfMetricsProm.observe |
3 | 2 | 0 |
attr |
PerfMetricsProm.counter_flops |
1 | 0 | 0 |
attr |
PerfMetricsProm.counter_read_bytes |
1 | 0 | 0 |
attr |
PerfMetricsProm.counter_write_bytes |
1 | 0 | 0 |
meth |
PerfMetricsLogging.init |
2 | 1 | 0 |
meth |
PerfMetricsLogging.reset |
1 | 0 | 0 |
meth |
PerfMetricsLogging.log |
3 | 2 | 0 |
attr |
PerfMetricsLogging.vllm_config |
1 | 0 | 0 |
attr |
PerfMetricsLogging.pp_size |
1 | 0 | 0 |
func |
getattr_from_list |
4 | 3 | 0 |
attr |
ModelMetrics.vllm_config |
1 | 0 | 0 |
meth |
ComponentMetrics.init_subclass |
1 | 0 | 0 |
meth |
ParsedArgs.getattr |
2 | 2 | 1 |
meth |
ParsedArgs.setattr |
3 | 3 | 1 |
attr |
ParserChain.parsers |
1 | 0 | 0 |
func |
get_required |
3 | 2 | 0 |
meth |
PerfMetricsDebugLogging.init |
1 | 0 | 0 |
meth |
PerfMetricsDebugLogging.reset |
1 | 0 | 0 |
meth |
PerfMetricsDebugLogging.log |
4 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.metrics.prometheus (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
unregister_vllm_metrics |
1 | 0 | 0 |
func |
shutdown_prometheus |
1 | 0 | 0 |
func |
setup_multiprocess_prometheus |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.metrics.ray_wrappers (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RayPrometheusMetric.init |
1 | 0 | 0 |
meth |
RayPrometheusMetric.labels |
3 | 0 | 0 |
meth |
RayCounterWrapper.init |
4 | 3 | 0 |
meth |
RayCounterWrapper.inc |
2 | 1 | 0 |
meth |
RayGaugeWrapper.init |
5 | 4 | 0 |
meth |
RayGaugeWrapper.set |
2 | 1 | 0 |
meth |
RayGaugeWrapper.set_to_current_time |
1 | 0 | 0 |
meth |
RayHistogramWrapper.init |
5 | 4 | 0 |
meth |
RayHistogramWrapper.observe |
2 | 1 | 0 |
meth |
RayPrometheusStatLogger._unregister_vllm_metrics |
1 | 0 | 0 |
vllm.v1.metrics.stats (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IterationStats.init |
1 | 0 | 0 |
meth |
IterationStats.update_from_output |
8 | 7 | 0 |
meth |
IterationStats.update_from_events |
7 | 6 | 0 |
meth |
IterationStats.update_from_finished_request |
6 | 5 | 0 |
attr |
IterationStats.iteration_timestamp |
1 | 0 | 0 |
attr |
IterationStats.num_generation_tokens |
1 | 0 | 0 |
attr |
IterationStats.prompt_token_stats |
1 | 0 | 0 |
attr |
IterationStats.num_preempted_reqs |
1 | 0 | 0 |
meth |
LoRARequestStates.init |
2 | 1 | 0 |
meth |
LoRARequestStates._request_update |
5 | 4 | 0 |
meth |
LoRARequestStates.request_waiting |
3 | 2 | 0 |
meth |
LoRARequestStates.request_running |
3 | 2 | 0 |
meth |
LoRARequestStates.request_finished |
3 | 2 | 0 |
meth |
LoRARequestStates.update_scheduler_stats |
2 | 1 | 0 |
attr |
LoRARequestStates.log_stats |
1 | 0 | 0 |
meth |
CachingMetrics.observe |
2 | 1 | 0 |
meth |
CachingMetrics.reset |
1 | 0 | 0 |
attr |
CachingMetrics.max_recent_requests |
1 | 0 | 0 |
attr |
CachingMetrics.aggregated_requests |
1 | 0 | 0 |
attr |
CachingMetrics.aggregated_query_total |
1 | 0 | 0 |
attr |
CachingMetrics.aggregated_query_hit |
1 | 0 | 0 |
attr |
CachingMetrics.query_queue |
1 | 0 | 0 |
meth |
LoRAStats.init |
1 | 0 | 0 |
meth |
LoRAStats.update |
4 | 3 | 0 |
vllm.v1.outputs (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
EMPTY_MODEL_RUNNER_OUTPUT |
1 | 0 | 0 |
meth |
LogprobsTensors.tolists |
2 | 1 | 0 |
meth |
LogprobsLists.slice_request |
3 | 2 | 0 |
meth |
KVConnectorOutput.is_empty |
1 | 0 | 0 |
meth |
KVConnectorOutput.merge |
2 | 1 | 0 |
vllm.v1.pool.metadata (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolingCursor.getitem |
2 | 1 | 0 |
meth |
PoolingCursor.is_partial_prefill |
1 | 0 | 0 |
meth |
PoolingCursor.is_finished |
1 | 0 | 0 |
meth |
PoolingMetadata.getitem |
2 | 1 | 0 |
meth |
PoolingMetadata.build_pooling_cursor |
4 | 3 | 0 |
attr |
pin_memory |
1 | 0 | 0 |
meth |
PoolingStates.init |
1 | 0 | 0 |
meth |
PoolingStates.clean |
1 | 0 | 0 |
vllm.v1.request (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Request.request_id |
1 | 0 | 0 |
attr |
Request.client_index |
1 | 0 | 0 |
attr |
Request.priority |
1 | 0 | 0 |
attr |
Request.sampling_params |
1 | 0 | 0 |
attr |
Request.pooling_params |
1 | 0 | 0 |
attr |
Request.lora_request |
1 | 0 | 0 |
attr |
Request.structured_output_request |
1 | 0 | 0 |
attr |
Request.arrival_time |
1 | 0 | 0 |
attr |
Request.status |
1 | 0 | 0 |
attr |
Request.prompt_token_ids |
1 | 0 | 0 |
attr |
Request.prompt_embeds |
1 | 0 | 0 |
attr |
Request.num_prompt_tokens |
1 | 0 | 0 |
attr |
Request.num_output_placeholders |
1 | 0 | 0 |
attr |
Request.discard_latest_async_tokens |
1 | 0 | 0 |
attr |
Request.num_computed_tokens |
1 | 0 | 0 |
attr |
Request.mm_features |
1 | 0 | 0 |
attr |
Request.output_token_ids |
1 | 0 | 0 |
attr |
Request.all_token_ids |
1 | 0 | 0 |
attr |
Request.trace_headers |
1 | 0 | 0 |
attr |
Request.num_cached_tokens |
1 | 0 | 0 |
attr |
Request.is_prefill_chunk |
1 | 0 | 0 |
attr |
Request.num_nans_in_logits |
1 | 0 | 0 |
attr |
Request.num_preemptions |
1 | 0 | 0 |
attr |
Request.num_external_computed_tokens |
1 | 0 | 0 |
attr |
Request.skip_reading_prefix_cache |
1 | 0 | 0 |
attr |
Request.resumable |
1 | 0 | 0 |
attr |
Request.max_tokens |
1 | 0 | 0 |
vllm.v1.sample.logits_processor (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AdapterLogitsProcessor.init |
4 | 3 | 0 |
meth |
AdapterLogitsProcessor.update_state |
2 | 1 | 0 |
vllm.v1.sample.logits_processor.builtin (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MinPLogitsProcessor.init |
4 | 3 | 0 |
meth |
MinPLogitsProcessor.update_state |
2 | 1 | 0 |
attr |
MinPLogitsProcessor.min_p_cpu_tensor |
1 | 0 | 0 |
attr |
MinPLogitsProcessor.min_p_cpu |
1 | 0 | 0 |
attr |
MinPLogitsProcessor.use_double_tensor |
1 | 0 | 0 |
meth |
MinTokensLogitsProcessor.init |
4 | 3 | 0 |
meth |
MinTokensLogitsProcessor.update_state |
2 | 1 | 0 |
attr |
MinTokensLogitsProcessor.device |
1 | 0 | 0 |
attr |
MinTokensLogitsProcessor.pin_memory |
1 | 0 | 0 |
attr |
MinTokensLogitsProcessor.neg_inf_tensor |
1 | 0 | 0 |
meth |
LogitBiasLogitsProcessor.init |
4 | 2 | 0 |
meth |
LogitBiasLogitsProcessor.update_state |
2 | 1 | 0 |
attr |
LogitBiasLogitsProcessor.device |
1 | 0 | 0 |
attr |
LogitBiasLogitsProcessor.pin_memory |
1 | 0 | 0 |
attr |
LogitBiasLogitsProcessor.logits_slice |
1 | 0 | 0 |
vllm.v1.sample.logits_processor.interface (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LogitsProcessor.validate_params |
2 | 1 | 0 |
vllm.v1.sample.logits_processor.state (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
BatchUpdateBuilder.batch_changed |
1 | 0 | 0 |
vllm.v1.sample.ops.topk_topp_sampler (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
TopKTopPSampler.logprobs_mode |
1 | 0 | 0 |
attr |
TopKTopPSampler.forward |
1 | 0 | 0 |
attr |
TopKTopPSampler.aiter_ops |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.sample.ops.topk_topp_triton (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
reset_buffer_cache |
1 | 0 | 0 |
vllm.v1.sample.rejection_sampler (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
rejection_random_sample_kernel |
13 | 1 | 0 |
func |
rejection_greedy_sample_kernel |
8 | 0 | 0 |
meth |
RejectionSampler.init |
2 | 1 | 0 |
attr |
RejectionSampler.sampler |
1 | 0 | 0 |
attr |
RejectionSampler.is_processed_logprobs_mode |
1 | 0 | 0 |
attr |
RejectionSampler.is_logits_logprobs_mode |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
sample_recovered_tokens_kernel |
10 | 2 | 0 |
func |
expand_kernel |
7 | 1 | 0 |
vllm.v1.sample.sampler (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sampler.init |
2 | 1 | 0 |
attr |
Sampler.topk_topp_sampler |
1 | 0 | 0 |
attr |
Sampler.pin_memory |
1 | 0 | 0 |
attr |
Sampler.logprobs_mode |
1 | 0 | 0 |
vllm.v1.serial_utils (11 missing, 23 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UtilityResult.init |
2 | 1 | 1 |
attr |
UtilityResult.result |
1 | 0 | 0 |
meth |
PydanticMsgspecMixin.get_pydantic_core_schema |
3 | 3 | 1 |
meth |
PydanticMsgspecMixin._validate_msgspec |
2 | 2 | 2 |
func |
run_method |
5 | 5 | 2 |
meth |
MsgpackEncoder.init |
2 | 1 | 0 |
meth |
MsgpackEncoder.encode |
2 | 2 | 1 |
meth |
MsgpackEncoder.encode_into |
3 | 3 | 1 |
meth |
MsgpackEncoder.enc_hook |
2 | 2 | 2 |
meth |
MsgpackEncoder._encode_nested_tensors |
2 | 2 | 1 |
meth |
MsgpackEncoder._encode_mm_field |
2 | 1 | 0 |
attr |
MsgpackEncoder.encoder |
1 | 0 | 0 |
attr |
MsgpackEncoder.size_threshold |
1 | 0 | 0 |
meth |
MsgpackDecoder.init |
3 | 2 | 0 |
meth |
MsgpackDecoder.decode |
2 | 2 | 1 |
meth |
MsgpackDecoder.dec_hook |
3 | 3 | 2 |
meth |
MsgpackDecoder._decode_utility_result |
2 | 2 | 1 |
meth |
MsgpackDecoder._convert_result |
3 | 3 | 2 |
meth |
MsgpackDecoder._decode_ndarray |
2 | 2 | 1 |
meth |
MsgpackDecoder._decode_tensor |
2 | 2 | 1 |
meth |
MsgpackDecoder._decode_nested_tensors |
2 | 2 | 1 |
meth |
MsgpackDecoder._decode_nested_slices |
2 | 2 | 2 |
meth |
MsgpackDecoder.ext_hook |
3 | 3 | 1 |
attr |
MsgpackDecoder.share_mem |
1 | 0 | 0 |
attr |
MsgpackDecoder.pin_tensors |
1 | 0 | 0 |
attr |
MsgpackDecoder.decoder |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.spec_decode.draft_model (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
DraftModelProposer.init |
4 | 2 | 0 |
meth |
DraftModelProposer._raise_if_vocab_size_mismatch |
1 | 0 | 0 |
meth |
DraftModelProposer._raise_if_draft_tp_mismatch |
1 | 0 | 0 |
vllm.v1.spec_decode.eagle (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpecDecodeBaseProposer.init |
5 | 3 | 0 |
meth |
SpecDecodeBaseProposer._raise_if_padded_drafter_batch_disabled |
1 | 0 | 0 |
meth |
SpecDecodeBaseProposer._raise_if_multimodal |
1 | 0 | 0 |
meth |
SpecDecodeBaseProposer._raise_if_mrope |
1 | 0 | 0 |
meth |
SpecDecodeBaseProposer._init_parallel_drafting_params |
1 | 0 | 0 |
meth |
SpecDecodeBaseProposer._get_positions |
2 | 1 | 0 |
meth |
SpecDecodeBaseProposer._set_positions |
3 | 2 | 0 |
attr |
SpecDecodeBaseProposer.vllm_config |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.speculative_config |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.draft_model_config |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.method |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.pass_hidden_states_to_model |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.runner |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.device |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.dtype |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.max_model_len |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.dp_rank |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.num_speculative_tokens |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.hidden_size |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.inputs_embeds_size |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.extra_slots_per_request |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.net_num_new_slots_per_request |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.needs_extra_input_slots |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.max_num_tokens |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.token_arange_np |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.mm_registry |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.supports_mm_inputs |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.compilation_config |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.cudagraph_dispatcher |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.input_ids |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.uses_mrope |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.uses_xdrope_dim |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.draft_uses_xdrope_dim |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.hidden_states |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.arange |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.inputs_embeds |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.backup_next_token_ids |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.cu_drafts_per_level |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.child_drafts_per_level |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.tree_draft_pos_offsets |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.mrope_positions |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.xdrope_positions |
1 | 0 | 0 |
attr |
SpecDecodeBaseProposer.positions |
1 | 0 | 0 |
meth |
EagleProposer.init |
4 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.spec_decode.extract_hidden_states (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExtractHiddenStatesProposer.init |
3 | 1 | 0 |
attr |
ExtractHiddenStatesProposer.vllm_config |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.device |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.dtype |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.dp_rank |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.max_num_tokens |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.hf_config |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.num_hidden_states |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.hidden_size |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.hidden_states |
1 | 0 | 0 |
attr |
ExtractHiddenStatesProposer.cudagraph_dispatcher |
1 | 0 | 0 |
vllm.v1.spec_decode.medusa (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MedusaProposer.init |
3 | 2 | 0 |
attr |
MedusaProposer.vllm_config |
1 | 0 | 0 |
attr |
MedusaProposer.spec_config |
1 | 0 | 0 |
attr |
MedusaProposer.device |
1 | 0 | 0 |
attr |
MedusaProposer.max_num_tokens |
1 | 0 | 0 |
attr |
MedusaProposer.hidden_size |
1 | 0 | 0 |
attr |
MedusaProposer.dtype |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.spec_decode.metadata (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpecDecodeMetadata.post_init |
1 | 0 | 0 |
vllm.v1.spec_decode.metrics (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpecDecodingProm.init |
4 | 3 | 0 |
meth |
SpecDecodingProm.observe |
3 | 2 | 0 |
attr |
SpecDecodingProm.spec_decoding_enabled |
1 | 0 | 0 |
attr |
SpecDecodingProm.counter_spec_decode_num_drafts |
1 | 0 | 0 |
attr |
SpecDecodingProm.counter_spec_decode_num_draft_tokens |
1 | 0 | 0 |
attr |
SpecDecodingProm.counter_spec_decode_num_accepted_tokens |
1 | 0 | 0 |
func |
make_per_engine |
3 | 2 | 0 |
meth |
SpecDecodingLogging.init |
1 | 0 | 0 |
meth |
SpecDecodingLogging.reset |
1 | 0 | 0 |
meth |
SpecDecodingLogging.observe |
2 | 1 | 0 |
meth |
SpecDecodingLogging.log |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
SpecDecodingStats.observe_draft |
3 | 2 | 0 |
vllm.v1.spec_decode.ngram_proposer (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NgramProposer.init |
2 | 1 | 0 |
meth |
NgramProposer.load_model |
3 | 0 | 0 |
attr |
NgramProposer.min_n |
1 | 0 | 0 |
attr |
NgramProposer.max_n |
1 | 0 | 0 |
attr |
NgramProposer.k |
1 | 0 | 0 |
attr |
NgramProposer.max_model_len |
1 | 0 | 0 |
attr |
NgramProposer.valid_ngram_draft |
1 | 0 | 0 |
attr |
NgramProposer.valid_ngram_num_drafts |
1 | 0 | 0 |
attr |
NgramProposer.num_tokens_threshold |
1 | 0 | 0 |
attr |
NgramProposer.num_numba_thread_available |
1 | 0 | 0 |
func |
batch_propose_numba |
10 | 9 | 0 |
vllm.v1.spec_decode.suffix_decoding (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuffixDecodingProposer.init |
2 | 1 | 0 |
meth |
SuffixDecodingProposer.load_model |
3 | 0 | 0 |
attr |
SuffixDecodingProposer.num_speculative_tokens |
1 | 0 | 0 |
attr |
SuffixDecodingProposer.max_tree_depth |
1 | 0 | 0 |
attr |
SuffixDecodingProposer.max_spec_factor |
1 | 0 | 0 |
attr |
SuffixDecodingProposer.min_token_prob |
1 | 0 | 0 |
attr |
SuffixDecodingProposer.max_model_len |
1 | 0 | 0 |
attr |
SuffixDecodingProposer.suffix_cache |
1 | 0 | 0 |
vllm.v1.spec_decode.utils (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
compute_new_slot_mapping |
7 | 6 | 0 |
func |
copy_and_expand_eagle_inputs_kernel |
18 | 1 | 0 |
func |
eagle_prepare_inputs_padded_kernel |
7 | 0 | 0 |
func |
eagle_prepare_next_token_padded_kernel |
11 | 1 | 0 |
vllm.v1.structured_output (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
StructuredOutputManager.init |
2 | 1 | 0 |
attr |
StructuredOutputManager.vllm_config |
1 | 0 | 0 |
attr |
StructuredOutputManager.fill_bitmask_parallel_threshold |
1 | 0 | 0 |
attr |
StructuredOutputManager.enable_in_reasoning |
1 | 0 | 0 |
attr |
StructuredOutputManager.fill_bitmask_parallel_batch_size |
1 | 0 | 0 |
attr |
StructuredOutputManager.executor_for_fillmask |
1 | 0 | 0 |
attr |
StructuredOutputManager.executor |
1 | 0 | 0 |
attr |
StructuredOutputManager.tokenizer |
1 | 0 | 0 |
attr |
torch |
1 | 0 | 0 |
vllm.v1.structured_output.backend_guidance (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GuidanceBackend.post_init |
1 | 0 | 0 |
meth |
GuidanceBackend.allocate_token_bitmask |
2 | 1 | 0 |
meth |
GuidanceBackend.destroy |
1 | 0 | 0 |
attr |
llguidance_hf |
1 | 0 | 0 |
meth |
GuidanceGrammar.check_error |
1 | 0 | 0 |
meth |
GuidanceGrammar.reset |
1 | 0 | 0 |
attr |
llguidance |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
llguidance_torch |
1 | 0 | 0 |
vllm.v1.structured_output.backend_lm_format_enforcer (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LMFormatEnforcerBackend.post_init |
1 | 0 | 0 |
meth |
LMFormatEnforcerBackend.destroy |
1 | 0 | 0 |
attr |
lmfe_vllm |
1 | 0 | 0 |
meth |
LMFormatEnforcerGrammar.reset |
1 | 0 | 0 |
func |
validate_structured_output_request_lm_format_enforcer |
2 | 1 | 0 |
attr |
lmformatenforcer |
1 | 0 | 0 |
vllm.v1.structured_output.backend_outlines (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
oc |
1 | 0 | 0 |
meth |
OutlinesGrammar.reset |
1 | 0 | 0 |
meth |
OutlinesBackend.post_init |
1 | 0 | 0 |
meth |
OutlinesBackend.destroy |
1 | 0 | 0 |
attr |
json_schema |
1 | 0 | 0 |
func |
validate_structured_output_request_outlines |
2 | 1 | 0 |
vllm.v1.structured_output.backend_types (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
StructuredOutputBackend.destroy |
1 | 0 | 0 |
meth |
StructuredOutputGrammar.reset |
1 | 0 | 0 |
vllm.v1.structured_output.backend_xgrammar (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
xgr |
1 | 0 | 0 |
meth |
XgrammarBackend.post_init |
1 | 0 | 0 |
meth |
XgrammarBackend.allocate_token_bitmask |
2 | 1 | 0 |
meth |
XgrammarBackend.destroy |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
XgrammarGrammar.reset |
1 | 0 | 0 |
vllm.v1.structured_output.utils (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
re_replacement_seq |
1 | 0 | 0 |
attr |
oc |
1 | 0 | 0 |
attr |
convert_slow_tokenizer |
1 | 0 | 0 |
attr |
xgr |
1 | 0 | 0 |
attr |
OutlinesVocabulary.inner |
1 | 0 | 0 |
func |
get_outlines_cache |
1 | 0 | 0 |
attr |
re_llama_byte_token |
1 | 0 | 0 |
attr |
file_utils |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.utils (28 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
report_usage_stats |
3 | 2 | 0 |
meth |
ConstantList.append |
2 | 0 | 0 |
meth |
ConstantList.extend |
2 | 0 | 0 |
meth |
ConstantList.insert |
2 | 0 | 0 |
meth |
ConstantList.pop |
2 | 0 | 0 |
meth |
ConstantList.remove |
2 | 0 | 0 |
meth |
ConstantList.clear |
1 | 0 | 0 |
meth |
ConstantList.setitem |
5 | 4 | 0 |
meth |
ConstantList.delitem |
2 | 0 | 0 |
meth |
ConstantList.iter |
1 | 0 | 0 |
meth |
ConstantList.contains |
2 | 0 | 0 |
meth |
ConstantList.len |
1 | 0 | 0 |
meth |
ConstantList.repr |
1 | 0 | 0 |
func |
shutdown |
2 | 1 | 0 |
attr |
CpuGpuBuffer.cpu |
1 | 0 | 0 |
attr |
CpuGpuBuffer.gpu |
1 | 0 | 0 |
meth |
APIServerProcessManager.init |
9 | 8 | 1 |
attr |
APIServerProcessManager.listen_address |
1 | 0 | 0 |
attr |
APIServerProcessManager.sock |
1 | 0 | 0 |
attr |
APIServerProcessManager.args |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.block_table (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlockTable.init |
9 | 8 | 0 |
attr |
BlockTable.max_num_reqs |
1 | 0 | 0 |
attr |
BlockTable.max_num_batched_tokens |
1 | 0 | 0 |
attr |
BlockTable.pin_memory |
1 | 0 | 0 |
attr |
BlockTable.device |
1 | 0 | 0 |
attr |
BlockTable.max_num_blocks_per_req |
1 | 0 | 0 |
attr |
BlockTable.block_table |
1 | 0 | 0 |
attr |
BlockTable.num_blocks_per_row |
1 | 0 | 0 |
attr |
BlockTable.slot_mapping |
1 | 0 | 0 |
attr |
BlockTable.cp_kv_cache_interleave_size |
1 | 0 | 0 |
attr |
BlockTable.block_size |
1 | 0 | 0 |
attr |
BlockTable.blocks_per_kv_block |
1 | 0 | 0 |
attr |
BlockTable.use_hybrid_blocks |
1 | 0 | 0 |
attr |
BlockTable.pcp_world_size |
1 | 0 | 0 |
attr |
BlockTable.pcp_rank |
1 | 0 | 0 |
attr |
BlockTable.dcp_world_size |
1 | 0 | 0 |
attr |
BlockTable.dcp_rank |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
MultiGroupBlockTable.block_tables |
1 | 0 | 0 |
vllm.v1.worker.cp_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_total_cp_world_size |
1 | 0 | 0 |
vllm.v1.worker.cpu_model_runner (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CPUModelRunner.init |
3 | 2 | 0 |
attr |
CPUModelRunner.use_cuda_graph |
1 | 0 | 0 |
attr |
CPUModelRunner.cascade_attn_enabled |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.cpu_worker (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
CPUWorker.init |
6 | 5 | 0 |
meth |
CPUWorker.init_device |
1 | 0 | 0 |
meth |
CPUWorker.profile |
3 | 2 | 0 |
vllm.v1.worker.dp_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.ec_connector_model_runner_mixin (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ECConnectorModelRunnerMixin.maybe_save_ec_to_connector |
3 | 2 | 0 |
meth |
ECConnectorModelRunnerMixin.maybe_get_ec_connector_output |
4 | 3 | 0 |
meth |
ECConnectorModelRunnerMixin._get_ec_connector_output |
4 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.gpu.async_utils (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AsyncOutput.init |
7 | 6 | 0 |
attr |
AsyncOutput.model_runner_output |
1 | 0 | 0 |
attr |
AsyncOutput.sampler_output |
1 | 0 | 0 |
attr |
AsyncOutput.num_sampled_tokens |
1 | 0 | 0 |
attr |
AsyncOutput.copy_event |
1 | 0 | 0 |
attr |
AsyncOutput.sampled_token_ids |
1 | 0 | 0 |
attr |
AsyncOutput.num_sampled_tokens_np |
1 | 0 | 0 |
attr |
AsyncOutput.prompt_logprobs_dict |
1 | 0 | 0 |
meth |
AsyncPoolingOutput.init |
7 | 6 | 0 |
attr |
AsyncPoolingOutput.model_runner_output |
1 | 0 | 0 |
attr |
AsyncPoolingOutput.pooler_output |
1 | 0 | 0 |
attr |
AsyncPoolingOutput.is_valid |
1 | 0 | 0 |
attr |
AsyncPoolingOutput.copy_event |
1 | 0 | 0 |
attr |
AsyncPoolingOutput.pooler_output_cpu |
1 | 0 | 0 |
attr |
AsyncPoolingOutput.is_valid_cpu |
1 | 0 | 0 |
func |
stream |
3 | 2 | 0 |
vllm.v1.worker.gpu.attn_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
init_attn_backend |
4 | 3 | 0 |
vllm.v1.worker.gpu.block_table (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlockTables.init |
9 | 8 | 0 |
attr |
BlockTables.block_sizes |
1 | 0 | 0 |
attr |
BlockTables.max_num_reqs |
1 | 0 | 0 |
attr |
BlockTables.max_num_batched_tokens |
1 | 0 | 0 |
attr |
BlockTables.max_model_len |
1 | 0 | 0 |
attr |
BlockTables.device |
1 | 0 | 0 |
attr |
BlockTables.cp_size |
1 | 0 | 0 |
attr |
BlockTables.cp_rank |
1 | 0 | 0 |
attr |
BlockTables.cp_interleave |
1 | 0 | 0 |
attr |
BlockTables.num_kv_cache_groups |
1 | 0 | 0 |
attr |
BlockTables.block_table_ptrs |
1 | 0 | 0 |
attr |
BlockTables.block_table_strides |
1 | 0 | 0 |
attr |
BlockTables.block_sizes_tensor |
1 | 0 | 0 |
attr |
BlockTables.num_blocks |
1 | 0 | 0 |
attr |
BlockTables.input_block_table_ptrs |
1 | 0 | 0 |
attr |
BlockTables.slot_mappings |
1 | 0 | 0 |
vllm.v1.worker.gpu.buffer_utils (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UvaBackedTensor.init |
4 | 3 | 0 |
attr |
UvaBackedTensor.dtype |
1 | 0 | 0 |
attr |
UvaBackedTensor.max_concurrency |
1 | 0 | 0 |
attr |
UvaBackedTensor.cpu |
1 | 0 | 0 |
attr |
UvaBackedTensor.np |
1 | 0 | 0 |
attr |
UvaBackedTensor.pool |
1 | 0 | 0 |
attr |
UvaBackedTensor.gpu |
1 | 0 | 0 |
meth |
UvaBuffer.init |
3 | 2 | 0 |
attr |
UvaBuffer.cpu |
1 | 0 | 0 |
attr |
UvaBuffer.np |
1 | 0 | 0 |
attr |
UvaBuffer.uva |
1 | 0 | 0 |
meth |
StagedWriteTensor.init |
6 | 5 | 0 |
attr |
StagedWriteTensor.num_rows |
1 | 0 | 0 |
attr |
StagedWriteTensor.dtype |
1 | 0 | 0 |
attr |
StagedWriteTensor.device |
1 | 0 | 0 |
attr |
StagedWriteTensor.max_concurrency |
1 | 0 | 0 |
attr |
StagedWriteTensor.write_indices |
1 | 0 | 0 |
attr |
StagedWriteTensor.write_starts |
1 | 0 | 0 |
attr |
StagedWriteTensor.write_cu_lens |
1 | 0 | 0 |
attr |
StagedWriteTensor.gpu |
1 | 0 | 0 |
meth |
UvaBufferPool.init |
4 | 3 | 0 |
attr |
UvaBufferPool.size |
1 | 0 | 0 |
attr |
UvaBufferPool.dtype |
1 | 0 | 0 |
attr |
UvaBufferPool.max_concurrency |
1 | 0 | 0 |
vllm.v1.worker.gpu.cudagraph_utils (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
capture_graphs |
7 | 6 | 0 |
meth |
CudaGraphManager.init |
4 | 3 | 0 |
attr |
CudaGraphManager.vllm_config |
1 | 0 | 0 |
attr |
CudaGraphManager.scheduler_config |
1 | 0 | 0 |
attr |
CudaGraphManager.use_aux_hidden_state_outputs |
1 | 0 | 0 |
attr |
CudaGraphManager.device |
1 | 0 | 0 |
attr |
CudaGraphManager.max_model_len |
1 | 0 | 0 |
attr |
CudaGraphManager.max_num_reqs |
1 | 0 | 0 |
attr |
CudaGraphManager.max_num_tokens |
1 | 0 | 0 |
attr |
CudaGraphManager.dp_size |
1 | 0 | 0 |
attr |
CudaGraphManager.uniform_decode_query_len |
1 | 0 | 0 |
attr |
CudaGraphManager.compilation_config |
1 | 0 | 0 |
attr |
CudaGraphManager.cudagraph_mode |
1 | 0 | 0 |
attr |
CudaGraphManager.pool |
1 | 0 | 0 |
vllm.v1.worker.gpu.input_batch (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InputBuffers.init |
4 | 3 | 0 |
attr |
InputBuffers.max_num_reqs |
1 | 0 | 0 |
attr |
InputBuffers.max_num_tokens |
1 | 0 | 0 |
attr |
InputBuffers.device |
1 | 0 | 0 |
attr |
InputBuffers.input_ids |
1 | 0 | 0 |
attr |
InputBuffers.positions |
1 | 0 | 0 |
attr |
InputBuffers.query_start_loc |
1 | 0 | 0 |
attr |
InputBuffers.seq_lens |
1 | 0 | 0 |
attr |
InputBuffers.dcp_local_seq_lens |
1 | 0 | 0 |
vllm.v1.worker.gpu.kv_connector (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ActiveKVConnector.init |
3 | 2 | 0 |
attr |
ActiveKVConnector.vllm_config |
1 | 0 | 0 |
attr |
ActiveKVConnector.kv_connector |
1 | 0 | 0 |
attr |
NO_OP_KV_CONNECTOR |
1 | 0 | 0 |
vllm.v1.worker.gpu.lora_utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LoraState.init |
2 | 1 | 0 |
attr |
LoraState.lora_ids |
1 | 0 | 0 |
vllm.v1.worker.gpu.mm.encoder_cache (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncoderCache.init |
1 | 0 | 0 |
vllm.v1.worker.gpu.mm.encoder_runner (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncoderRunner.init |
7 | 6 | 0 |
attr |
EncoderRunner.model |
1 | 0 | 0 |
attr |
EncoderRunner.max_num_tokens |
1 | 0 | 0 |
attr |
EncoderRunner.hidden_size |
1 | 0 | 0 |
attr |
EncoderRunner.encoder_cache |
1 | 0 | 0 |
attr |
EncoderRunner.dtype |
1 | 0 | 0 |
attr |
EncoderRunner.device |
1 | 0 | 0 |
attr |
EncoderRunner.inputs_embeds |
1 | 0 | 0 |
vllm.v1.worker.gpu.mm.mrope_utils (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MRopeState.init |
5 | 4 | 0 |
attr |
MRopeState.max_num_reqs |
1 | 0 | 0 |
attr |
MRopeState.max_num_tokens |
1 | 0 | 0 |
attr |
MRopeState.max_model_len |
1 | 0 | 0 |
attr |
MRopeState.device |
1 | 0 | 0 |
attr |
MRopeState.prefill_mrope_positions |
1 | 0 | 0 |
attr |
MRopeState.prefill_mrope_delta |
1 | 0 | 0 |
attr |
MRopeState.mrope_positions |
1 | 0 | 0 |
vllm.v1.worker.gpu.model_runner (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
GPUModelRunner.init |
3 | 2 | 0 |
meth |
GPUModelRunner.load_model |
3 | 1 | 0 |
meth |
GPUModelRunner.get_kv_cache_spec |
1 | 0 | 0 |
meth |
GPUModelRunner._dummy_run |
6 | 4 | 0 |
attr |
GPUModelRunner.vllm_config |
1 | 0 | 0 |
attr |
GPUModelRunner.model_config |
1 | 0 | 0 |
attr |
GPUModelRunner.cache_config |
1 | 0 | 0 |
attr |
GPUModelRunner.compilation_config |
1 | 0 | 0 |
attr |
GPUModelRunner.lora_config |
1 | 0 | 0 |
attr |
GPUModelRunner.load_config |
1 | 0 | 0 |
attr |
GPUModelRunner.parallel_config |
1 | 0 | 0 |
attr |
GPUModelRunner.scheduler_config |
1 | 0 | 0 |
attr |
GPUModelRunner.speculative_config |
1 | 0 | 0 |
attr |
GPUModelRunner.observability_config |
1 | 0 | 0 |
attr |
GPUModelRunner.device |
1 | 0 | 0 |
attr |
GPUModelRunner.dtype |
1 | 0 | 0 |
attr |
GPUModelRunner.kv_cache_dtype |
1 | 0 | 0 |
attr |
GPUModelRunner.vocab_size |
1 | 0 | 0 |
attr |
GPUModelRunner.max_model_len |
1 | 0 | 0 |
attr |
GPUModelRunner.max_num_tokens |
1 | 0 | 0 |
attr |
GPUModelRunner.max_num_reqs |
1 | 0 | 0 |
attr |
GPUModelRunner.use_async_scheduling |
1 | 0 | 0 |
attr |
GPUModelRunner.output_copy_stream |
1 | 0 | 0 |
attr |
GPUModelRunner.output_copy_event |
1 | 0 | 0 |
attr |
GPUModelRunner.pp_size |
1 | 0 | 0 |
attr |
GPUModelRunner.use_pp |
1 | 0 | 0 |
attr |
GPUModelRunner.dcp_size |
1 | 0 | 0 |
attr |
GPUModelRunner.use_dcp |
1 | 0 | 0 |
attr |
GPUModelRunner.dcp_rank |
1 | 0 | 0 |
attr |
GPUModelRunner.cp_interleave |
1 | 0 | 0 |
attr |
GPUModelRunner.mm_registry |
1 | 0 | 0 |
attr |
GPUModelRunner.supports_mm_inputs |
1 | 0 | 0 |
attr |
GPUModelRunner.encoder_cache |
1 | 0 | 0 |
attr |
GPUModelRunner.speculator |
1 | 0 | 0 |
attr |
GPUModelRunner.num_speculative_steps |
1 | 0 | 0 |
attr |
GPUModelRunner.use_aux_hidden_state_outputs |
1 | 0 | 0 |
attr |
GPUModelRunner.draft_tokens_handler |
1 | 0 | 0 |
attr |
GPUModelRunner.req_states |
1 | 0 | 0 |
attr |
GPUModelRunner.input_buffers |
1 | 0 | 0 |
attr |
GPUModelRunner.sampler |
1 | 0 | 0 |
attr |
GPUModelRunner.prompt_logprobs_worker |
1 | 0 | 0 |
attr |
GPUModelRunner.cudagraph_manager |
1 | 0 | 0 |
attr |
GPUModelRunner.structured_outputs_worker |
1 | 0 | 0 |
attr |
GPUModelRunner.lora_state |
1 | 0 | 0 |
attr |
GPUModelRunner.is_pooling_model |
1 | 0 | 0 |
attr |
GPUModelRunner.is_first_pp_rank |
1 | 0 | 0 |
attr |
GPUModelRunner.is_last_pp_rank |
1 | 0 | 0 |
vllm.v1.worker.gpu.model_states (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
init_model_state |
5 | 4 | 0 |
vllm.v1.worker.gpu.model_states.default (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DefaultModelState.init |
5 | 4 | 0 |
attr |
DefaultModelState.vllm_config |
1 | 0 | 0 |
attr |
DefaultModelState.model_config |
1 | 0 | 0 |
attr |
DefaultModelState.scheduler_config |
1 | 0 | 0 |
attr |
DefaultModelState.model |
1 | 0 | 0 |
attr |
DefaultModelState.device |
1 | 0 | 0 |
attr |
DefaultModelState.supports_mm_inputs |
1 | 0 | 0 |
attr |
DefaultModelState.max_model_len |
1 | 0 | 0 |
attr |
DefaultModelState.max_num_reqs |
1 | 0 | 0 |
attr |
DefaultModelState.max_num_tokens |
1 | 0 | 0 |
attr |
DefaultModelState.inputs_embeds_size |
1 | 0 | 0 |
attr |
DefaultModelState.dtype |
1 | 0 | 0 |
attr |
DefaultModelState.uses_mrope |
1 | 0 | 0 |
attr |
DefaultModelState.encoder_cache |
1 | 0 | 0 |
attr |
DefaultModelState.encoder_runner |
1 | 0 | 0 |
attr |
DefaultModelState.mrope_state |
1 | 0 | 0 |
vllm.v1.worker.gpu.pool.pooling_runner (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolingRunner.init |
2 | 1 | 0 |
attr |
PoolingRunner.model |
1 | 0 | 0 |
vllm.v1.worker.gpu.sample.bad_words (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BadWordsState.init |
2 | 1 | 0 |
attr |
BadWordsState.req_states |
1 | 0 | 0 |
attr |
BadWordsState.max_num_reqs |
1 | 0 | 0 |
attr |
BadWordsState.device |
1 | 0 | 0 |
attr |
BadWordsState.bad_word_token_ids |
1 | 0 | 0 |
attr |
BadWordsState.bad_word_offsets |
1 | 0 | 0 |
attr |
BadWordsState.num_bad_words |
1 | 0 | 0 |
vllm.v1.worker.gpu.sample.logit_bias (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LogitBiasState.init |
3 | 2 | 0 |
attr |
LogitBiasState.max_num_reqs |
1 | 0 | 0 |
attr |
LogitBiasState.num_allowed_token_ids |
1 | 0 | 0 |
attr |
LogitBiasState.allowed_token_ids |
1 | 0 | 0 |
attr |
LogitBiasState.num_logit_bias |
1 | 0 | 0 |
attr |
LogitBiasState.logit_bias_token_ids |
1 | 0 | 0 |
attr |
LogitBiasState.logit_bias |
1 | 0 | 0 |
attr |
LogitBiasState.min_lens |
1 | 0 | 0 |
attr |
LogitBiasState.num_stop_token_ids |
1 | 0 | 0 |
attr |
LogitBiasState.stop_token_ids |
1 | 0 | 0 |
attr |
LogitBiasState.use_logit_bias |
1 | 0 | 0 |
vllm.v1.worker.gpu.sample.penalties (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PenaltiesState.init |
2 | 1 | 0 |
attr |
PenaltiesState.req_states |
1 | 0 | 0 |
attr |
PenaltiesState.vocab_size |
1 | 0 | 0 |
attr |
PenaltiesState.device |
1 | 0 | 0 |
attr |
PenaltiesState.repetition_penalty |
1 | 0 | 0 |
attr |
PenaltiesState.frequency_penalty |
1 | 0 | 0 |
attr |
PenaltiesState.presence_penalty |
1 | 0 | 0 |
attr |
PenaltiesState.use_penalty |
1 | 0 | 0 |
attr |
PenaltiesState.prompt_bin_mask |
1 | 0 | 0 |
attr |
PenaltiesState.output_bin_counts |
1 | 0 | 0 |
vllm.v1.worker.gpu.sample.prompt_logprob (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PromptLogprobsWorker.init |
2 | 1 | 0 |
meth |
PromptLogprobsWorker.add_request |
4 | 3 | 0 |
attr |
PromptLogprobsWorker.max_num_reqs |
1 | 0 | 0 |
attr |
PromptLogprobsWorker.uses_prompt_logprobs |
1 | 0 | 0 |
vllm.v1.worker.gpu.sample.sampler (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sampler.init |
7 | 6 | 0 |
attr |
Sampler.logprobs_mode |
1 | 0 | 0 |
attr |
Sampler.compute_nans |
1 | 0 | 0 |
attr |
Sampler.sampling_states |
1 | 0 | 0 |
attr |
Sampler.penalties_state |
1 | 0 | 0 |
attr |
Sampler.logit_bias_state |
1 | 0 | 0 |
attr |
Sampler.bad_words_state |
1 | 0 | 0 |
attr |
Sampler.num_speculative_tokens |
1 | 0 | 0 |
vllm.v1.worker.gpu.sample.states (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamplingStates.init |
3 | 2 | 0 |
attr |
SamplingStates.max_num_reqs |
1 | 0 | 0 |
attr |
SamplingStates.vocab_size |
1 | 0 | 0 |
attr |
SamplingStates.temperature |
1 | 0 | 0 |
attr |
SamplingStates.top_k |
1 | 0 | 0 |
attr |
SamplingStates.top_p |
1 | 0 | 0 |
attr |
SamplingStates.min_p |
1 | 0 | 0 |
attr |
SamplingStates.seeds |
1 | 0 | 0 |
attr |
SamplingStates.num_logprobs |
1 | 0 | 0 |
vllm.v1.worker.gpu.spec_decode (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
init_speculator |
3 | 2 | 0 |
vllm.v1.worker.gpu.spec_decode.eagle.cudagraph (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EagleCudaGraphManager.init |
3 | 2 | 0 |
attr |
EagleCudaGraphManager.vllm_config |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.scheduler_config |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.device |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.max_model_len |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.max_num_reqs |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.max_num_tokens |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.dp_size |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.compilation_config |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.cudagraph_mode |
1 | 0 | 0 |
attr |
EagleCudaGraphManager.pool |
1 | 0 | 0 |
vllm.v1.worker.gpu.spec_decode.eagle.eagle3_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.gpu.spec_decode.eagle.speculator (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
prepare_eagle_decode |
10 | 9 | 0 |
func |
update_eagle_inputs |
6 | 5 | 0 |
meth |
EagleSpeculator.init |
3 | 2 | 0 |
attr |
EagleSpeculator.vllm_config |
1 | 0 | 0 |
attr |
EagleSpeculator.device |
1 | 0 | 0 |
attr |
EagleSpeculator.speculative_config |
1 | 0 | 0 |
attr |
EagleSpeculator.method |
1 | 0 | 0 |
attr |
EagleSpeculator.num_speculative_steps |
1 | 0 | 0 |
attr |
EagleSpeculator.draft_model_config |
1 | 0 | 0 |
attr |
EagleSpeculator.scheduler_config |
1 | 0 | 0 |
attr |
EagleSpeculator.max_num_reqs |
1 | 0 | 0 |
attr |
EagleSpeculator.max_num_tokens |
1 | 0 | 0 |
attr |
EagleSpeculator.max_model_len |
1 | 0 | 0 |
attr |
EagleSpeculator.hidden_size |
1 | 0 | 0 |
attr |
EagleSpeculator.vocab_size |
1 | 0 | 0 |
attr |
EagleSpeculator.dtype |
1 | 0 | 0 |
attr |
EagleSpeculator.dp_size |
1 | 0 | 0 |
attr |
EagleSpeculator.dp_rank |
1 | 0 | 0 |
attr |
EagleSpeculator.input_buffers |
1 | 0 | 0 |
attr |
EagleSpeculator.hidden_states |
1 | 0 | 0 |
attr |
EagleSpeculator.idx_mapping |
1 | 0 | 0 |
attr |
EagleSpeculator.temperature |
1 | 0 | 0 |
attr |
EagleSpeculator.seeds |
1 | 0 | 0 |
attr |
EagleSpeculator.draft_tokens |
1 | 0 | 0 |
attr |
EagleSpeculator.cudagraph_manager |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.gpu.spec_decode.utils (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DraftTokensHandler.init |
2 | 1 | 0 |
attr |
DraftTokensHandler.device |
1 | 0 | 0 |
attr |
DraftTokensHandler.copy_stream |
1 | 0 | 0 |
attr |
DraftTokensHandler.copy_event |
1 | 0 | 0 |
vllm.v1.worker.gpu.states (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RequestState.init |
7 | 6 | 0 |
attr |
RequestState.max_num_reqs |
1 | 0 | 0 |
attr |
RequestState.max_model_len |
1 | 0 | 0 |
attr |
RequestState.max_num_batched_tokens |
1 | 0 | 0 |
attr |
RequestState.num_speculative_steps |
1 | 0 | 0 |
attr |
RequestState.vocab_size |
1 | 0 | 0 |
attr |
RequestState.device |
1 | 0 | 0 |
attr |
RequestState.free_indices |
1 | 0 | 0 |
attr |
RequestState.all_token_ids |
1 | 0 | 0 |
attr |
RequestState.prompt_len |
1 | 0 | 0 |
attr |
RequestState.prefill_len |
1 | 0 | 0 |
attr |
RequestState.total_len |
1 | 0 | 0 |
attr |
RequestState.num_computed_prefill_tokens |
1 | 0 | 0 |
attr |
RequestState.num_computed_tokens |
1 | 0 | 0 |
attr |
RequestState.last_sampled_tokens |
1 | 0 | 0 |
attr |
RequestState.draft_tokens |
1 | 0 | 0 |
attr |
RequestState.next_prefill_tokens |
1 | 0 | 0 |
vllm.v1.worker.gpu.structured_outputs (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
StructuredOutputsWorker.init |
4 | 3 | 0 |
attr |
StructuredOutputsWorker.logits_indices |
1 | 0 | 0 |
attr |
StructuredOutputsWorker.grammar_bitmask |
1 | 0 | 0 |
attr |
StructuredOutputsWorker.device |
1 | 0 | 0 |
attr |
StructuredOutputsWorker.copy_stream |
1 | 0 | 0 |
vllm.v1.worker.gpu_input_batch (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CachedRequestState.post_init |
1 | 0 | 0 |
meth |
InputBatch.init |
15 | 14 | 0 |
meth |
InputBatch.refresh_metadata |
1 | 0 | 0 |
attr |
InputBatch.is_pooling_model |
1 | 0 | 0 |
attr |
InputBatch.is_spec_decode |
1 | 0 | 0 |
attr |
InputBatch.max_num_reqs |
1 | 0 | 0 |
attr |
InputBatch.max_model_len |
1 | 0 | 0 |
attr |
InputBatch.max_num_batched_tokens |
1 | 0 | 0 |
attr |
InputBatch.device |
1 | 0 | 0 |
attr |
InputBatch.pin_memory |
1 | 0 | 0 |
attr |
InputBatch.vocab_size |
1 | 0 | 0 |
attr |
InputBatch.token_ids_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.token_ids_cpu |
1 | 0 | 0 |
attr |
InputBatch.is_token_ids_tensor |
1 | 0 | 0 |
attr |
InputBatch.is_token_ids |
1 | 0 | 0 |
attr |
InputBatch.num_tokens_no_spec |
1 | 0 | 0 |
attr |
InputBatch.num_prompt_tokens |
1 | 0 | 0 |
attr |
InputBatch.num_computed_tokens_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.num_computed_tokens_cpu |
1 | 0 | 0 |
attr |
InputBatch.block_table |
1 | 0 | 0 |
attr |
InputBatch.temperature |
1 | 0 | 0 |
attr |
InputBatch.temperature_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.temperature_cpu |
1 | 0 | 0 |
attr |
InputBatch.top_p |
1 | 0 | 0 |
attr |
InputBatch.top_p_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.top_p_cpu |
1 | 0 | 0 |
attr |
InputBatch.top_k |
1 | 0 | 0 |
attr |
InputBatch.top_k_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.top_k_cpu |
1 | 0 | 0 |
attr |
InputBatch.frequency_penalties |
1 | 0 | 0 |
attr |
InputBatch.frequency_penalties_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.frequency_penalties_cpu |
1 | 0 | 0 |
attr |
InputBatch.presence_penalties |
1 | 0 | 0 |
attr |
InputBatch.presence_penalties_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.presence_penalties_cpu |
1 | 0 | 0 |
attr |
InputBatch.repetition_penalties |
1 | 0 | 0 |
attr |
InputBatch.repetition_penalties_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.repetition_penalties_cpu |
1 | 0 | 0 |
attr |
InputBatch.num_accepted_tokens_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.num_accepted_tokens_cpu |
1 | 0 | 0 |
attr |
InputBatch.request_lora_mapping |
1 | 0 | 0 |
attr |
InputBatch.batch_update_builder |
1 | 0 | 0 |
attr |
InputBatch.logits_processing_needs_token_ids |
1 | 0 | 0 |
attr |
InputBatch.logitsprocs |
1 | 0 | 0 |
attr |
InputBatch.logitsprocs_need_output_token_ids |
1 | 0 | 0 |
attr |
InputBatch.sampling_metadata |
1 | 0 | 0 |
vllm.v1.worker.gpu_model_runner (88 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AsyncGPUPoolingModelRunnerOutput.init |
5 | 4 | 0 |
attr |
AsyncGPUPoolingModelRunnerOutput.async_copy_ready_event |
1 | 0 | 0 |
meth |
AsyncGPUModelRunnerOutput.init |
7 | 6 | 0 |
attr |
AsyncGPUModelRunnerOutput.async_copy_ready_event |
1 | 0 | 0 |
attr |
AsyncGPUModelRunnerOutput.vocab_size |
1 | 0 | 0 |
attr |
AsyncGPUModelRunnerOutput.sampled_token_ids_cpu |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
GPUModelRunner.init |
3 | 2 | 0 |
meth |
GPUModelRunner._get_positions |
2 | 1 | 1 |
meth |
GPUModelRunner._init_model_kwargs |
1 | 0 | 0 |
meth |
GPUModelRunner._init_mrope_positions |
2 | 1 | 0 |
meth |
GPUModelRunner._init_xdrope_positions |
2 | 1 | 0 |
meth |
GPUModelRunner._calc_mrope_positions |
2 | 1 | 0 |
meth |
GPUModelRunner._calc_xdrope_positions |
2 | 1 | 0 |
meth |
GPUModelRunner.synchronize_input_prep |
1 | 0 | 0 |
meth |
GPUModelRunner._model_forward |
6 | 6 | 1 |
meth |
GPUModelRunner.maybe_randomize_inputs |
3 | 2 | 0 |
meth |
GPUModelRunner._capture_cudagraphs |
3 | 2 | 0 |
meth |
GPUModelRunner.init_routed_experts_capturer |
1 | 0 | 0 |
meth |
GPUModelRunner.timed_encoder_operation |
5 | 4 | 0 |
attr |
GPUModelRunner.vllm_config |
1 | 0 | 0 |
attr |
GPUModelRunner.model_config |
1 | 0 | 0 |
attr |
GPUModelRunner.cache_config |
1 | 0 | 0 |
attr |
GPUModelRunner.offload_config |
1 | 0 | 0 |
attr |
GPUModelRunner.compilation_config |
1 | 0 | 0 |
attr |
GPUModelRunner.lora_config |
1 | 0 | 0 |
attr |
GPUModelRunner.load_config |
1 | 0 | 0 |
attr |
GPUModelRunner.parallel_config |
1 | 0 | 0 |
attr |
GPUModelRunner.scheduler_config |
1 | 0 | 0 |
attr |
GPUModelRunner.speculative_config |
1 | 0 | 0 |
attr |
GPUModelRunner.observability_config |
1 | 0 | 0 |
attr |
GPUModelRunner.device |
1 | 0 | 0 |
attr |
GPUModelRunner.pin_memory |
1 | 0 | 0 |
attr |
GPUModelRunner.dtype |
1 | 0 | 0 |
attr |
GPUModelRunner.kv_cache_dtype |
1 | 0 | 0 |
attr |
GPUModelRunner.is_pooling_model |
1 | 0 | 0 |
attr |
GPUModelRunner.enable_prompt_embeds |
1 | 0 | 0 |
attr |
GPUModelRunner.is_multimodal_raw_input_only_model |
1 | 0 | 0 |
attr |
GPUModelRunner.is_multimodal_pruning_enabled |
1 | 0 | 0 |
attr |
GPUModelRunner.max_model_len |
1 | 0 | 0 |
attr |
GPUModelRunner.calculate_kv_scales |
1 | 0 | 0 |
attr |
GPUModelRunner.dcp_world_size |
1 | 0 | 0 |
attr |
GPUModelRunner.dcp_rank |
1 | 0 | 0 |
attr |
GPUModelRunner.max_num_tokens |
1 | 0 | 0 |
attr |
GPUModelRunner.max_num_reqs |
1 | 0 | 0 |
attr |
GPUModelRunner.broadcast_pp_output |
1 | 0 | 0 |
attr |
GPUModelRunner.num_query_heads |
1 | 0 | 0 |
attr |
GPUModelRunner.inputs_embeds_size |
1 | 0 | 0 |
attr |
GPUModelRunner.attention_chunk_size |
1 | 0 | 0 |
attr |
GPUModelRunner.use_alibi |
1 | 0 | 0 |
attr |
GPUModelRunner.cascade_attn_enabled |
1 | 0 | 0 |
attr |
GPUModelRunner.is_mm_prefix_lm |
1 | 0 | 0 |
attr |
GPUModelRunner.mm_registry |
1 | 0 | 0 |
attr |
GPUModelRunner.uses_mrope |
1 | 0 | 0 |
attr |
GPUModelRunner.uses_xdrope_dim |
1 | 0 | 0 |
attr |
GPUModelRunner.supports_mm_inputs |
1 | 0 | 0 |
attr |
GPUModelRunner.use_async_scheduling |
1 | 0 | 0 |
attr |
GPUModelRunner.sampler |
1 | 0 | 0 |
attr |
GPUModelRunner.eep_eplb_suppressed |
1 | 0 | 0 |
attr |
GPUModelRunner.use_aux_hidden_state_outputs |
1 | 0 | 0 |
attr |
GPUModelRunner.num_spec_tokens |
1 | 0 | 0 |
attr |
GPUModelRunner.comm_stream |
1 | 0 | 0 |
attr |
GPUModelRunner.input_batch |
1 | 0 | 0 |
attr |
GPUModelRunner.input_ids |
1 | 0 | 0 |
attr |
GPUModelRunner.positions |
1 | 0 | 0 |
attr |
GPUModelRunner.query_start_loc |
1 | 0 | 0 |
attr |
GPUModelRunner.seq_lens |
1 | 0 | 0 |
attr |
GPUModelRunner.encoder_seq_lens |
1 | 0 | 0 |
attr |
GPUModelRunner.inputs_embeds |
1 | 0 | 0 |
attr |
GPUModelRunner.is_token_ids |
1 | 0 | 0 |
attr |
GPUModelRunner.discard_request_mask |
1 | 0 | 0 |
attr |
GPUModelRunner.num_decode_draft_tokens |
1 | 0 | 0 |
attr |
GPUModelRunner.num_accepted_tokens |
1 | 0 | 0 |
attr |
GPUModelRunner.arange_np |
1 | 0 | 0 |
attr |
GPUModelRunner.kv_sharing_fast_prefill_logits_indices |
1 | 0 | 0 |
attr |
GPUModelRunner.uniform_decode_query_len |
1 | 0 | 0 |
attr |
GPUModelRunner.cudagraph_dispatcher |
1 | 0 | 0 |
attr |
GPUModelRunner.mm_budget |
1 | 0 | 0 |
attr |
GPUModelRunner.transfer_event |
1 | 0 | 0 |
attr |
GPUModelRunner.sampled_token_ids_pinned_cpu |
1 | 0 | 0 |
attr |
GPUModelRunner.layerwise_nvtx_hooks_registered |
1 | 0 | 0 |
attr |
GPUModelRunner.max_encoder_len |
1 | 0 | 0 |
attr |
GPUModelRunner.rejection_sampler |
1 | 0 | 0 |
attr |
GPUModelRunner.dcp_local_seq_lens |
1 | 0 | 0 |
attr |
GPUModelRunner.is_mm_embed_buffers |
1 | 0 | 0 |
attr |
GPUModelRunner.is_mm_embed_idx |
1 | 0 | 0 |
attr |
GPUModelRunner.mrope_positions |
1 | 0 | 0 |
attr |
GPUModelRunner.xdrope_positions |
1 | 0 | 0 |
attr |
GPUModelRunner.effective_drafter_max_model_len |
1 | 0 | 0 |
vllm.v1.worker.gpu_ubatch_wrapper (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UBatchWrapper.init |
5 | 4 | 0 |
meth |
UBatchWrapper._create_sm_control_context |
2 | 1 | 0 |
meth |
UBatchWrapper.getattr |
2 | 1 | 0 |
meth |
UBatchWrapper._capture_ubatches |
3 | 1 | 0 |
meth |
UBatchWrapper._run_ubatches |
3 | 1 | 0 |
meth |
UBatchWrapper._make_ubatch_metadata |
12 | 1 | 0 |
meth |
UBatchWrapper._slice_model_inputs |
6 | 1 | 0 |
meth |
UBatchWrapper.call |
3 | 0 | 0 |
attr |
UBatchWrapper.runnable |
1 | 0 | 0 |
attr |
UBatchWrapper.vllm_config |
1 | 0 | 0 |
attr |
UBatchWrapper.compilation_config |
1 | 0 | 0 |
attr |
UBatchWrapper.comm_stream |
1 | 0 | 0 |
attr |
UBatchWrapper.ready_barrier |
1 | 0 | 0 |
attr |
UBatchWrapper.cudagraph_wrapper |
1 | 0 | 0 |
attr |
UBatchWrapper.graph_pool |
1 | 0 | 0 |
attr |
UBatchWrapper.sm_control |
1 | 0 | 0 |
attr |
UBatchWrapper.device |
1 | 0 | 0 |
meth |
SMControlContextManager.init |
4 | 3 | 0 |
meth |
SMControlContextManager.enter |
1 | 0 | 0 |
meth |
SMControlContextManager.exit |
4 | 0 | 0 |
attr |
SMControlContextManager.total_sms |
1 | 0 | 0 |
attr |
SMControlContextManager.compute_sms |
1 | 0 | 0 |
attr |
SMControlContextManager.comm_sms |
1 | 0 | 0 |
attr |
SMControlContextManager.set_comm_sms |
1 | 0 | 0 |
attr |
SMControlContextManager.set_compute_sms |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.gpu_worker (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AsyncIntermediateTensors.getattribute |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Worker.init |
6 | 5 | 0 |
meth |
Worker.init_device |
1 | 0 | 0 |
meth |
Worker.reload_weights |
3 | 1 | 0 |
meth |
Worker.annotate_profile |
2 | 0 | 0 |
meth |
Worker.profile |
3 | 2 | 0 |
meth |
Worker.elastic_ep_execute |
4 | 1 | 0 |
attr |
Worker.elastic_ep_executor |
1 | 0 | 0 |
attr |
Worker.weight_transfer_engine |
1 | 0 | 0 |
attr |
Worker.profiler_config |
1 | 0 | 0 |
attr |
Worker.use_v2_model_runner |
1 | 0 | 0 |
vllm.v1.worker.kv_connector_model_runner_mixin (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.lora_model_runner_mixin (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
LoRAModelRunnerMixin.maybe_setup_dummy_loras |
3 | 2 | 0 |
meth |
LoRAModelRunnerMixin.maybe_select_dummy_loras |
6 | 5 | 0 |
meth |
LoRAModelRunnerMixin.maybe_dummy_run_with_lora |
7 | 6 | 0 |
meth |
LoRAModelRunnerMixin.maybe_remove_all_loras |
2 | 1 | 0 |
vllm.v1.worker.mamba_utils (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
batch_memcpy_kernel |
5 | 1 | 0 |
func |
batch_memcpy |
4 | 0 | 0 |
func |
preprocess_mamba |
10 | 9 | 0 |
func |
do_mamba_copy_block |
2 | 1 | 0 |
func |
postprocess_mamba |
9 | 8 | 0 |
vllm.v1.worker.tpu_input_batch (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InputBatch.init |
9 | 8 | 0 |
attr |
InputBatch.max_num_reqs |
1 | 0 | 0 |
attr |
InputBatch.max_model_len |
1 | 0 | 0 |
attr |
InputBatch.max_num_batched_tokens |
1 | 0 | 0 |
attr |
InputBatch.device |
1 | 0 | 0 |
attr |
InputBatch.pin_memory |
1 | 0 | 0 |
attr |
InputBatch.vocab_size |
1 | 0 | 0 |
attr |
InputBatch.token_ids_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.token_ids_cpu |
1 | 0 | 0 |
attr |
InputBatch.num_tokens_no_spec |
1 | 0 | 0 |
attr |
InputBatch.num_prompt_tokens |
1 | 0 | 0 |
attr |
InputBatch.num_computed_tokens_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.num_computed_tokens_cpu |
1 | 0 | 0 |
attr |
InputBatch.block_table |
1 | 0 | 0 |
attr |
InputBatch.temperature |
1 | 0 | 0 |
attr |
InputBatch.temperature_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.temperature_cpu |
1 | 0 | 0 |
attr |
InputBatch.top_p |
1 | 0 | 0 |
attr |
InputBatch.top_p_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.top_p_cpu |
1 | 0 | 0 |
attr |
InputBatch.top_k |
1 | 0 | 0 |
attr |
InputBatch.top_k_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.top_k_cpu |
1 | 0 | 0 |
attr |
InputBatch.min_p |
1 | 0 | 0 |
attr |
InputBatch.min_p_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.min_p_cpu |
1 | 0 | 0 |
attr |
InputBatch.frequency_penalties |
1 | 0 | 0 |
attr |
InputBatch.frequency_penalties_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.frequency_penalties_cpu |
1 | 0 | 0 |
attr |
InputBatch.presence_penalties |
1 | 0 | 0 |
attr |
InputBatch.presence_penalties_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.presence_penalties_cpu |
1 | 0 | 0 |
attr |
InputBatch.repetition_penalties |
1 | 0 | 0 |
attr |
InputBatch.repetition_penalties_cpu_tensor |
1 | 0 | 0 |
attr |
InputBatch.repetition_penalties_cpu |
1 | 0 | 0 |
attr |
InputBatch.request_lora_mapping |
1 | 0 | 0 |
vllm.v1.worker.ubatching (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
dbo_switch_to_comm_sync |
1 | 0 | 0 |
meth |
UBatchContext.init |
11 | 10 | 0 |
meth |
UBatchContext.enter |
1 | 0 | 0 |
meth |
UBatchContext.exit |
4 | 0 | 0 |
meth |
UBatchContext.restore_context |
1 | 0 | 0 |
meth |
UBatchContext.update_stream |
2 | 0 | 0 |
meth |
UBatchContext._signal_comm_done |
1 | 0 | 0 |
meth |
UBatchContext._signal_compute_done |
1 | 0 | 0 |
meth |
UBatchContext._wait_compute_done |
1 | 0 | 0 |
meth |
UBatchContext._wait_comm_done |
1 | 0 | 0 |
meth |
UBatchContext._cpu_yield |
1 | 0 | 0 |
meth |
UBatchContext.switch_to_comm |
1 | 0 | 0 |
meth |
UBatchContext.switch_to_compute |
1 | 0 | 0 |
meth |
UBatchContext.switch_to_comm_sync |
1 | 0 | 0 |
meth |
UBatchContext.switch_to_compute_sync |
1 | 0 | 0 |
meth |
UBatchContext.maybe_run_recv_hook |
1 | 0 | 0 |
meth |
UBatchContext.yield |
1 | 0 | 0 |
meth |
UBatchContext.yield_and_switch_from_compute_to_comm |
1 | 0 | 0 |
meth |
UBatchContext.yield_and_switch_from_comm_to_compute |
1 | 0 | 0 |
attr |
UBatchContext.id |
1 | 0 | 0 |
attr |
UBatchContext.comm_stream |
1 | 0 | 0 |
attr |
UBatchContext.compute_stream |
1 | 0 | 0 |
attr |
UBatchContext.forward_context |
1 | 0 | 0 |
attr |
UBatchContext.ready_barrier |
1 | 0 | 0 |
attr |
UBatchContext.cpu_wait_event |
1 | 0 | 0 |
attr |
UBatchContext.cpu_signal_event |
1 | 0 | 0 |
attr |
UBatchContext.current_stream |
1 | 0 | 0 |
attr |
UBatchContext.gpu_comm_done_event |
1 | 0 | 0 |
attr |
UBatchContext.gpu_compute_done_event |
1 | 0 | 0 |
attr |
UBatchContext.schedule |
1 | 0 | 0 |
attr |
UBatchContext.recv_hook |
1 | 0 | 0 |
attr |
dbo_switch_to_compute_sync |
1 | 0 | 0 |
func |
dbo_get_previous_event |
4 | 0 | 0 |
attr |
dbo_switch_to_comm |
1 | 0 | 0 |
attr |
dbo_yield_and_switch_from_compute_to_comm |
1 | 0 | 0 |
attr |
dbo_maybe_run_recv_hook |
1 | 0 | 0 |
attr |
dbo_switch_to_compute |
1 | 0 | 0 |
attr |
dbo_yield |
1 | 0 | 0 |
func |
dbo_register_recv_hook |
2 | 0 | 0 |
attr |
dbo_yield_and_switch_from_comm_to_compute |
1 | 0 | 0 |
vllm.v1.worker.utils (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
AttentionGroup.create_metadata_builders |
5 | 2 | 0 |
meth |
KVBlockZeroer.init |
3 | 2 | 0 |
attr |
KVBlockZeroer.device |
1 | 0 | 0 |
attr |
KVBlockZeroer.pin_memory |
1 | 0 | 0 |
vllm.v1.worker.worker_base (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WorkerWrapperBase.init_device |
1 | 0 | 0 |
meth |
WorkerWrapperBase.execute_method |
4 | 1 | 0 |
meth |
WorkerWrapperBase.getattr |
2 | 1 | 0 |
attr |
WorkerWrapperBase.rpc_rank |
1 | 0 | 0 |
attr |
WorkerWrapperBase.global_rank |
1 | 0 | 0 |
attr |
WorkerBase.vllm_config |
1 | 0 | 0 |
attr |
WorkerBase.model_config |
1 | 0 | 0 |
attr |
WorkerBase.cache_config |
1 | 0 | 0 |
attr |
WorkerBase.lora_config |
1 | 0 | 0 |
attr |
WorkerBase.load_config |
1 | 0 | 0 |
attr |
WorkerBase.parallel_config |
1 | 0 | 0 |
attr |
WorkerBase.scheduler_config |
1 | 0 | 0 |
attr |
WorkerBase.device_config |
1 | 0 | 0 |
attr |
WorkerBase.speculative_config |
1 | 0 | 0 |
attr |
WorkerBase.observability_config |
1 | 0 | 0 |
attr |
WorkerBase.kv_transfer_config |
1 | 0 | 0 |
attr |
WorkerBase.compilation_config |
1 | 0 | 0 |
attr |
WorkerBase.current_platform |
1 | 0 | 0 |
attr |
WorkerBase.local_rank |
1 | 0 | 0 |
attr |
WorkerBase.rank |
1 | 0 | 0 |
attr |
WorkerBase.distributed_init_method |
1 | 0 | 0 |
attr |
WorkerBase.is_driver_worker |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.workspace (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WorkspaceManager.init |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.xpu_model_runner (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XPUModelRunner.init |
3 | 2 | 0 |
attr |
XPUModelRunner.cascade_attn_enabled |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
vllm.v1.worker.xpu_worker (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
XPUWorker.init |
6 | 5 | 0 |
meth |
XPUWorker.init_device |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.barrier (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
red_release |
5 | 3 | 0 |
func |
red_relaxed |
5 | 3 | 0 |
func |
ld_acquire |
4 | 2 | 0 |
vllm.vllm_flash_attn.cute.benchmark (65 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
benchmark_fwd_bwd |
10 | 0 | 0 |
func |
benchmark_backward |
10 | 0 | 0 |
func |
benchmark_memory |
6 | 0 | 0 |
func |
pytorch_profiler |
10 | 0 | 0 |
func |
benchmark_combined |
10 | 0 | 0 |
func |
benchmark_all |
10 | 0 | 0 |
func |
benchmark_forward |
9 | 0 | 0 |
vllm.vllm_flash_attn.cute.blackwell_helpers (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
gemm_ptx_w_idx |
11 | 10 | 0 |
vllm.vllm_flash_attn.cute.block_sparse_utils (140 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
handle_block_sparse_empty_tile_correction_sm100 |
34 | 28 | 0 |
func |
dQaccum_store_block_sparse_bwd_sm90 |
12 | 7 | 0 |
func |
produce_block_sparse_loads_sm100 |
14 | 5 | 0 |
func |
consume_block_sparse_loads |
21 | 6 | 0 |
func |
produce_block_sparse_q_loads_bwd_sm100 |
26 | 5 | 0 |
func |
consume_block_sparse_mma_bwd_sm90 |
19 | 5 | 0 |
func |
get_total_q_block_count_bwd |
7 | 3 | 0 |
func |
sparse_tensor_m_block |
4 | 2 | 0 |
func |
load_block_list_sm100 |
11 | 3 | 0 |
func |
produce_block_sparse_loads |
16 | 6 | 0 |
func |
load_block_list |
14 | 6 | 0 |
func |
get_total_block_count |
7 | 3 | 0 |
func |
softmax_block_sparse_sm100 |
21 | 16 | 0 |
func |
get_block_sparse_iteration_info_bwd |
7 | 3 | 0 |
func |
get_m_block_from_iter_bwd |
8 | 4 | 0 |
func |
finish_overlap_v_load |
6 | 1 | 0 |
func |
produce_block_sparse_q_loads_bwd_sm90 |
20 | 4 | 0 |
vllm.vllm_flash_attn.cute.block_sparsity (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlockSparseTensors.new_from_mlir_values |
2 | 0 | 0 |
func |
fast_sampling |
2 | 0 | 0 |
vllm.vllm_flash_attn.cute.compute_block_sparsity (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlockSparsityKernel.init |
6 | 5 | 0 |
meth |
BlockSparsityKernel.call |
5 | 4 | 0 |
meth |
BlockSparsityKernel.kernel |
9 | 8 | 0 |
attr |
BlockSparsityKernel.mask_mod |
1 | 0 | 0 |
attr |
BlockSparsityKernel.tile_mn |
1 | 0 | 0 |
attr |
BlockSparsityKernel.compute_full_blocks |
1 | 0 | 0 |
attr |
BlockSparsityKernel.use_aux_tensors |
1 | 0 | 0 |
attr |
BlockSparsityKernel.use_fast_sampling |
1 | 0 | 0 |
func |
compute_block_sparsity |
12 | 5 | 0 |
vllm.vllm_flash_attn.cute.copy_utils (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
cpasync_bulk_g2s |
7 | 4 | 0 |
func |
set_block_rank |
5 | 3 | 0 |
func |
cpasync_reduce_bulk_add_f32 |
6 | 3 | 0 |
func |
cpasync_bulk_get_copy_fn |
5 | 4 | 0 |
func |
atomic_add_fp32x4 |
8 | 6 | 0 |
func |
tma_get_copy_fn |
9 | 8 | 0 |
func |
tma_producer_copy_fn |
3 | 2 | 0 |
func |
cvt_copy |
8 | 5 | 0 |
func |
store_shared_remote_fp32x4 |
10 | 8 | 0 |
func |
get_copy_atom |
6 | 4 | 0 |
func |
copy |
9 | 6 | 0 |
func |
make_tmem_copy |
5 | 3 | 0 |
func |
load_s2r |
4 | 2 | 0 |
vllm.vllm_flash_attn.cute.cute_dsl_ptxas (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
patch |
1 | 0 | 0 |
attr |
CUTE_DSL_PTXAS_PATH |
1 | 0 | 0 |
attr |
VERBOSE |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.cute_dsl_utils (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
load_cubin_module_data_patched |
3 | 0 | 0 |
func |
cute_compile_patched |
3 | 0 | 0 |
func |
assume_tensor_aligned |
2 | 0 | 0 |
meth |
ParamsBase.extract_mlir_values |
1 | 0 | 0 |
meth |
ParamsBase.new_from_mlir_values |
2 | 0 | 0 |
func |
get_max_active_clusters |
2 | 0 | 0 |
meth |
ArgumentsBase.c_pointers |
1 | 0 | 0 |
meth |
ArgumentsBase.get_mlir_types |
1 | 0 | 0 |
meth |
ArgumentsBase.new_from_mlir_values |
2 | 0 | 0 |
func |
to_cute_tensor |
6 | 0 | 0 |
func |
assume_strides_aligned |
2 | 0 | 0 |
attr |
StaticTypes |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.flash_bwd (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttentionBackwardSm80.init |
19 | 18 | 0 |
meth |
FlashAttentionBackwardSm80.can_implement |
11 | 1 | 0 |
meth |
FlashAttentionBackwardSm80._check_type |
14 | 13 | 0 |
meth |
FlashAttentionBackwardSm80._setup_attributes |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm80._get_tiled_mma |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm80._get_shared_storage_cls |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm80.call |
20 | 19 | 0 |
meth |
FlashAttentionBackwardSm80.kernel |
35 | 34 | 0 |
meth |
FlashAttentionBackwardSm80.compute_one_m_block |
14 | 13 | 0 |
meth |
FlashAttentionBackwardSm80.epilogue |
17 | 16 | 0 |
meth |
FlashAttentionBackwardSm80.advance_pipeline |
3 | 1 | 0 |
meth |
FlashAttentionBackwardSm80.load_K |
7 | 6 | 0 |
meth |
FlashAttentionBackwardSm80.load_V |
7 | 6 | 0 |
meth |
FlashAttentionBackwardSm80.load_Q_LSE |
14 | 13 | 0 |
meth |
FlashAttentionBackwardSm80.load_dO_dPsum |
14 | 13 | 0 |
attr |
FlashAttentionBackwardSm80.dtype |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.head_dim_padded |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.same_hdim_kv |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.head_dim_v_padded |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.check_hdim_oob |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.check_hdim_v_oob |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.qhead_per_kvhead |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.m_block_size |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.n_block_size |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.num_threads |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.pack_gqa |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.is_causal |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.num_stages_Q |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.num_stages_dO |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.SdP_swapAB |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.dKV_swapAB |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.dQ_swapAB |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.AtomLayoutMSdP |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.AtomLayoutNdKV |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.AtomLayoutMdQ |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.Mma_dKV_is_RS |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.V_in_regs |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm80.share_QV_smem |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.flash_bwd_postprocess (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttentionBackwardPostprocess.init |
8 | 7 | 0 |
meth |
FlashAttentionBackwardPostprocess.can_implement |
5 | 1 | 0 |
meth |
FlashAttentionBackwardPostprocess._get_tiled_mma |
1 | 0 | 0 |
meth |
FlashAttentionBackwardPostprocess._setup_attributes |
1 | 0 | 0 |
meth |
FlashAttentionBackwardPostprocess.call |
7 | 6 | 0 |
meth |
FlashAttentionBackwardPostprocess.kernel |
15 | 14 | 0 |
attr |
FlashAttentionBackwardPostprocess.dtype |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPostprocess.tile_m |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPostprocess.arch |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPostprocess.tile_hdim |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPostprocess.check_hdim_oob |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPostprocess.num_threads |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPostprocess.AtomLayoutMdQ |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPostprocess.dQ_swapAB |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.flash_bwd_preprocess (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttentionBackwardPreprocess.init |
6 | 5 | 0 |
meth |
FlashAttentionBackwardPreprocess.can_implement |
5 | 1 | 0 |
meth |
FlashAttentionBackwardPreprocess._setup_attributes |
1 | 0 | 0 |
meth |
FlashAttentionBackwardPreprocess.call |
10 | 9 | 0 |
meth |
FlashAttentionBackwardPreprocess.kernel |
13 | 12 | 0 |
attr |
FlashAttentionBackwardPreprocess.dtype |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPreprocess.m_block_size |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPreprocess.arch |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPreprocess.head_dim_padded |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPreprocess.check_hdim_oob |
1 | 0 | 0 |
attr |
FlashAttentionBackwardPreprocess.num_threads |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.flash_bwd_sm100 (90 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttentionBackwardSm100.init |
16 | 15 | 0 |
meth |
FlashAttentionBackwardSm100._setup_attributes |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm100._get_tiled_mma |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm100._setup_smem_layout |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm100.call |
24 | 23 | 0 |
meth |
FlashAttentionBackwardSm100.kernel |
54 | 52 | 0 |
meth |
FlashAttentionBackwardSm100.load |
31 | 30 | 0 |
meth |
FlashAttentionBackwardSm100.mma |
33 | 32 | 0 |
meth |
FlashAttentionBackwardSm100.split_wg |
4 | 3 | 0 |
meth |
FlashAttentionBackwardSm100.apply_score_mod |
12 | 0 | 0 |
meth |
FlashAttentionBackwardSm100.apply_score_mod_bwd |
10 | 0 | 0 |
meth |
FlashAttentionBackwardSm100.compute_loop |
38 | 36 | 0 |
meth |
FlashAttentionBackwardSm100.dQacc_reduce |
11 | 10 | 0 |
meth |
FlashAttentionBackwardSm100.epilogue_dKV |
16 | 14 | 0 |
meth |
FlashAttentionBackwardSm100.epilogue_dK_or_dV_tma |
17 | 16 | 0 |
attr |
FlashAttentionBackwardSm100.tile_hdim |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.same_hdim_kv |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tile_hdimv |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.check_hdim_oob |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.check_hdim_v_oob |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tile_m |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tile_n |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.cta_tiler |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.mma_tiler_kq |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.mma_tiler_vdo |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.mma_tiler_pdo |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.mma_tiler_dsq |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.mma_tiler_dsk |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.acc_dtype |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.cluster_shape_mn |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.is_persistent |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.is_causal |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.is_local |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.qhead_per_kvhead |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.pack_gqa |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.deterministic |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.score_mod |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.score_mod_bwd |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.mask_mod |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.has_aux_tensors |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.subtile_factor |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.qk_acc_dtype |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.shuffle_LSE |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.shuffle_dPsum |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.use_smem_dS_for_mma_dK |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.reduce_warp_ids |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.compute_warp_ids |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.mma_warp_id |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.load_warp_id |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.epi_warp_id |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.empty_warp_id |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.threads_per_cta |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.compute_sync_barrier |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.reduce_sync_barrier |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tmem_alloc_cols |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tmem_S_offset |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tmem_P_offset |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tmem_dV_offset |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tmem_dP_offset |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tmem_dQ_offset |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tmem_dK_offset |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.tmem_dS_offset |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.num_regs_other |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.num_regs_empty |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.buffer_align_bytes |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.num_regs_reduce |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm100.num_regs_compute |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.flash_bwd_sm90 (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttentionBackwardSm90.init |
24 | 23 | 0 |
meth |
FlashAttentionBackwardSm90.can_implement |
9 | 1 | 0 |
meth |
FlashAttentionBackwardSm90._check_type |
10 | 9 | 0 |
meth |
FlashAttentionBackwardSm90._setup_attributes |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm90._get_tiled_mma |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm90._get_shared_storage_cls |
1 | 0 | 0 |
meth |
FlashAttentionBackwardSm90.call |
24 | 23 | 0 |
meth |
FlashAttentionBackwardSm90.kernel |
38 | 34 | 0 |
meth |
FlashAttentionBackwardSm90.load |
24 | 23 | 0 |
meth |
FlashAttentionBackwardSm90.apply_score_mod |
11 | 3 | 0 |
meth |
FlashAttentionBackwardSm90.apply_score_mod_bwd |
12 | 4 | 0 |
meth |
FlashAttentionBackwardSm90.mma |
35 | 33 | 0 |
meth |
FlashAttentionBackwardSm90.mma_one_m_block |
25 | 24 | 0 |
meth |
FlashAttentionBackwardSm90.epilogue_dKV |
19 | 18 | 0 |
meth |
FlashAttentionBackwardSm90.dQaccum_store |
7 | 6 | 0 |
attr |
FlashAttentionBackwardSm90.dtype |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.tile_hdim |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.same_hdim_kv |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.tile_hdimv |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.check_hdim_oob |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.check_hdim_v_oob |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.qhead_per_kvhead |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.is_causal |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.is_local |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.tile_m |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.tile_n |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.num_threads |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.Q_stage |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.dO_stage |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.PdS_stage |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.SdP_swapAB |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.dKV_swapAB |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.dQ_swapAB |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.AtomLayoutMSdP |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.AtomLayoutNdKV |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.AtomLayoutMdQ |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.num_mma_warp_groups |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.mma_dkv_is_rs |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.V_in_regs |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.shuffle_LSE |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.shuffle_dPsum |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.score_mod |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.score_mod_bwd |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.mask_mod |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.has_aux_tensors |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.subtile_factor |
1 | 0 | 0 |
attr |
FlashAttentionBackwardSm90.qk_acc_dtype |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.flash_fwd (89 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttentionForwardSm80._get_smem_layout_atom |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm80._get_tiled_mma |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm80._get_shared_storage_cls |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm80.call |
12 | 10 | 0 |
meth |
FlashAttentionForwardSm80.kernel |
24 | 21 | 0 |
meth |
FlashAttentionForwardSm80.compute_one_n_block |
19 | 16 | 0 |
meth |
FlashAttentionForwardBase.init |
17 | 16 | 0 |
meth |
FlashAttentionForwardBase.can_implement |
10 | 1 | 0 |
meth |
FlashAttentionForwardBase._check_type |
10 | 9 | 0 |
meth |
FlashAttentionForwardBase._setup_attributes |
1 | 0 | 0 |
meth |
FlashAttentionForwardBase._get_smem_layout_atom |
1 | 0 | 0 |
meth |
FlashAttentionForwardBase._get_tiled_mma |
1 | 0 | 0 |
meth |
FlashAttentionForwardBase._get_shared_storage_cls |
1 | 0 | 0 |
meth |
FlashAttentionForwardBase.call |
8 | 7 | 0 |
meth |
FlashAttentionForwardBase.epilogue |
14 | 13 | 0 |
meth |
FlashAttentionForwardBase.advance_pipeline |
2 | 0 | 0 |
meth |
FlashAttentionForwardBase.load_Q |
7 | 6 | 0 |
meth |
FlashAttentionForwardBase.load_K |
11 | 10 | 0 |
meth |
FlashAttentionForwardBase.load_V |
11 | 10 | 0 |
attr |
FlashAttentionForwardBase.dtype |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.tile_hdim |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.same_hdim_kv |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.tile_hdimv |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.check_hdim_oob |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.check_hdim_v_oob |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.qhead_per_kvhead |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.is_causal |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.is_local |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.pack_gqa |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.tile_m |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.tile_n |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.num_threads |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.num_stages |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.q_subtile_factor |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.Q_in_regs |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.score_mod |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.mask_mod |
1 | 0 | 0 |
attr |
FlashAttentionForwardBase.qk_acc_dtype |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm90.init |
5 | 2 | 0 |
meth |
FlashAttentionForwardSm90._get_smem_layout_atom |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm90._get_tiled_mma |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm90._get_shared_storage_cls |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm90.call |
18 | 17 | 0 |
meth |
FlashAttentionForwardSm90.kernel |
36 | 33 | 0 |
meth |
FlashAttentionForwardSm90.load |
17 | 16 | 0 |
meth |
FlashAttentionForwardSm90.mma |
28 | 26 | 0 |
meth |
FlashAttentionForwardSm90.first_half_block_overlap |
12 | 9 | 0 |
meth |
FlashAttentionForwardSm90.last_half_block_overlap |
5 | 2 | 0 |
meth |
FlashAttentionForwardSm90.mma_one_n_block |
16 | 15 | 0 |
meth |
FlashAttentionForwardSm90.mma_one_n_block_intrawg_overlap |
15 | 14 | 0 |
meth |
FlashAttentionForwardSm90.mma_init |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm90.apply_score_mod |
11 | 1 | 0 |
meth |
FlashAttentionForwardSm90.warp_scheduler_barrier_sync |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm90.warp_scheduler_barrier_arrive |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm90.intra_wg_overlap |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm90.mma_pv_is_rs |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm90.buffer_align_bytes |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.flash_fwd_combine (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttentionForwardCombine.init |
9 | 8 | 0 |
meth |
FlashAttentionForwardCombine.can_implement |
8 | 1 | 0 |
meth |
FlashAttentionForwardCombine._setup_attributes |
1 | 0 | 0 |
meth |
FlashAttentionForwardCombine.call |
10 | 9 | 0 |
meth |
FlashAttentionForwardCombine.kernel |
19 | 18 | 0 |
attr |
FlashAttentionForwardCombine.dtype |
1 | 0 | 0 |
attr |
FlashAttentionForwardCombine.dtype_partial |
1 | 0 | 0 |
attr |
FlashAttentionForwardCombine.head_dim |
1 | 0 | 0 |
attr |
FlashAttentionForwardCombine.m_block_size |
1 | 0 | 0 |
attr |
FlashAttentionForwardCombine.k_block_size |
1 | 0 | 0 |
attr |
FlashAttentionForwardCombine.max_splits |
1 | 0 | 0 |
attr |
FlashAttentionForwardCombine.num_threads |
1 | 0 | 0 |
attr |
FlashAttentionForwardCombine.is_even_k |
1 | 0 | 0 |
attr |
FlashAttentionForwardCombine.stages |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.flash_fwd_sm100 (84 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlashAttentionForwardSm100.init |
18 | 17 | 0 |
meth |
FlashAttentionForwardSm100._setup_attributes |
1 | 0 | 0 |
meth |
FlashAttentionForwardSm100.call |
18 | 17 | 0 |
meth |
FlashAttentionForwardSm100.kernel |
34 | 31 | 0 |
meth |
FlashAttentionForwardSm100.load |
20 | 19 | 0 |
meth |
FlashAttentionForwardSm100.mma |
16 | 15 | 0 |
meth |
FlashAttentionForwardSm100.softmax_loop |
19 | 16 | 0 |
meth |
FlashAttentionForwardSm100.softmax_step |
26 | 23 | 0 |
meth |
FlashAttentionForwardSm100.correction_loop |
19 | 18 | 0 |
meth |
FlashAttentionForwardSm100.correction_rescale |
5 | 4 | 0 |
meth |
FlashAttentionForwardSm100.correction_epilogue |
12 | 11 | 0 |
meth |
FlashAttentionForwardSm100.epilogue_s2g |
10 | 9 | 0 |
meth |
FlashAttentionForwardSm100.load_Q |
7 | 6 | 0 |
meth |
FlashAttentionForwardSm100.load_KV |
12 | 11 | 0 |
meth |
FlashAttentionForwardSm100.offset_kv_smem |
4 | 3 | 0 |
meth |
FlashAttentionForwardSm100.make_and_init_load_kv_pipeline |
2 | 0 | 0 |
meth |
FlashAttentionForwardSm100.apply_score_mod |
13 | 1 | 0 |
attr |
FlashAttentionForwardSm100.use_tma_KV |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.head_dim_padded |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.same_hdim_kv |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.head_dim_v_padded |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.same_hdim_kv_padded |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.check_hdim_oob |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.check_hdim_v_oob |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.m_block_size |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.n_block_size |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.q_stage |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.cta_tiler |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.mma_tiler_qk |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.mma_tiler_pv |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.qk_acc_dtype |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.pv_acc_dtype |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.cluster_shape_mn |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.is_persistent |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.is_causal |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.is_local |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.is_varlen_q |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.use_correction_warps_for_epi |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.qhead_per_kvhead |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.is_split_kv |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.pack_gqa |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.q_subtile_factor |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.score_mod |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.mask_mod |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.s0_s1_barrier |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.overlap_sO_sQ |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.softmax0_warp_ids |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.softmax1_warp_ids |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.correction_warp_ids |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.mma_warp_id |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.epilogue_warp_ids |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.load_warp_ids |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.empty_warp_ids |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.tmem_alloc_cols |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.threads_per_cta |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.tmem_s_offset |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.tmem_o_offset |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.tmem_total |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.tmem_s_to_p_offset |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.tmem_p_offset |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.tmem_vec_offset |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.num_regs_empty |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.buffer_align_bytes |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.num_regs_softmax |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.num_regs_correction |
1 | 0 | 0 |
attr |
FlashAttentionForwardSm100.num_regs_other |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.hopper_helpers (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
make_smem_layout |
7 | 5 | 0 |
vllm.vllm_flash_attn.cute.interface (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
flash_attn_func |
18 | 17 | 0 |
func |
flash_attn_varlen_func |
21 | 20 | 0 |
meth |
FlashAttnFunc.forward |
19 | 17 | 0 |
meth |
FlashAttnFunc.backward |
4 | 0 | 0 |
func |
num_splits_heuristic |
5 | 0 | 0 |
func |
maybe_contiguous |
2 | 0 | 0 |
meth |
FlashAttnVarlenFunc.forward |
22 | 20 | 0 |
meth |
FlashAttnVarlenFunc.backward |
4 | 0 | 0 |
vllm.vllm_flash_attn.cute.mask (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AttentionMask.apply_mask |
13 | 12 | 0 |
meth |
AttentionMask.apply_mask_sm100 |
16 | 14 | 0 |
meth |
AttentionMask.apply_mask_sm100_transposed |
16 | 15 | 0 |
vllm.vllm_flash_attn.cute.mma_sm100_desc (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
to_C_format |
2 | 1 | 0 |
func |
mma_op_to_idesc |
2 | 1 | 0 |
func |
to_UMMA_format |
2 | 1 | 0 |
func |
make_instr_desc |
13 | 10 | 0 |
vllm.vllm_flash_attn.cute.pack_gqa (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PackGQA.init |
5 | 4 | 0 |
meth |
PackGQA.compute_ptr |
7 | 6 | 0 |
meth |
PackGQA.load_Q |
7 | 6 | 0 |
meth |
PackGQA.store_LSE |
7 | 6 | 0 |
meth |
PackGQA.store_O |
7 | 6 | 0 |
attr |
PackGQA.m_block_size |
1 | 0 | 0 |
attr |
PackGQA.head_dim_padded |
1 | 0 | 0 |
attr |
PackGQA.check_hdim_oob |
1 | 0 | 0 |
attr |
PackGQA.qhead_per_kvhead |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.paged_kv (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PagedKVManager.create |
15 | 14 | 0 |
meth |
PagedKVManager.load_page_table |
2 | 1 | 0 |
meth |
PagedKVManager.compute_X_ptr |
2 | 1 | 0 |
meth |
PagedKVManager.load_KV |
4 | 3 | 0 |
vllm.vllm_flash_attn.cute.pipeline (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
make_pipeline_state |
3 | 2 | 0 |
meth |
PipelineStateSimple.init |
3 | 2 | 0 |
meth |
PipelineStateSimple.advance |
1 | 0 | 0 |
meth |
PipelineStateSimple.extract_mlir_values |
1 | 0 | 0 |
meth |
PipelineStateSimple.new_from_mlir_values |
2 | 0 | 0 |
meth |
PipelineTmaUmma.create |
3 | 0 | 0 |
meth |
PipelineTmaUmma.producer_acquire |
6 | 3 | 0 |
meth |
PipelineTmaAsync.create |
3 | 0 | 0 |
meth |
PipelineTmaAsync.producer_acquire |
6 | 3 | 0 |
vllm.vllm_flash_attn.cute.seqlen_info (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeqlenInfoQK.create |
10 | 9 | 0 |
meth |
SeqlenInfo.create |
5 | 4 | 0 |
vllm.vllm_flash_attn.cute.softmax (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Softmax.create |
5 | 4 | 0 |
func |
apply_score_mod_bwd_inner |
16 | 6 | 0 |
meth |
SoftmaxSm100.create |
4 | 3 | 0 |
meth |
SoftmaxSm100.scale_subtract_rowmax |
3 | 2 | 0 |
meth |
SoftmaxSm100.apply_exp2_convert |
7 | 6 | 0 |
meth |
SoftmaxSm100.scale_apply_exp2_convert |
4 | 3 | 0 |
func |
apply_score_mod_inner |
15 | 7 | 0 |
func |
floor_if_packed |
3 | 2 | 0 |
vllm.vllm_flash_attn.cute.testing (80 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IndexFirstAxis.forward |
4 | 0 | 0 |
meth |
IndexFirstAxis.backward |
3 | 0 | 0 |
func |
construct_local_mask |
9 | 0 | 0 |
func |
unpad_input |
4 | 0 | 0 |
func |
generate_qkv |
11 | 0 | 0 |
func |
pad_input |
5 | 0 | 0 |
func |
attention_ref |
23 | 1 | 0 |
func |
generate_random_padding_mask |
6 | 0 | 0 |
func |
construct_chunk_mask |
8 | 0 | 0 |
meth |
IndexPutFirstAxis.forward |
5 | 0 | 0 |
meth |
IndexPutFirstAxis.backward |
3 | 0 | 0 |
vllm.vllm_flash_attn.cute.tile_scheduler (125 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SingleTileLPTScheduler.init |
6 | 3 | 0 |
meth |
SingleTileLPTScheduler.to_underlying_arguments |
4 | 2 | 0 |
meth |
SingleTileLPTScheduler.create |
4 | 2 | 0 |
meth |
SingleTileLPTScheduler.get_grid_shape |
4 | 2 | 0 |
meth |
SingleTileLPTScheduler.get_current_work |
3 | 1 | 0 |
meth |
SingleTileLPTScheduler.initial_work_tile_info |
3 | 0 | 0 |
meth |
SingleTileLPTScheduler.prefetch_next_work |
3 | 0 | 0 |
meth |
SingleTileLPTScheduler.advance_to_next_work |
3 | 0 | 0 |
meth |
SingleTileLPTScheduler.extract_mlir_values |
1 | 0 | 0 |
meth |
SingleTileLPTScheduler.new_from_mlir_values |
2 | 0 | 0 |
attr |
SingleTileLPTScheduler.params |
1 | 0 | 0 |
meth |
ParamsBase.extract_mlir_values |
1 | 0 | 0 |
meth |
ParamsBase.new_from_mlir_values |
2 | 0 | 0 |
meth |
SingleTileScheduler.init |
5 | 2 | 0 |
meth |
SingleTileScheduler.to_underlying_arguments |
4 | 2 | 0 |
meth |
SingleTileScheduler.create |
4 | 2 | 0 |
meth |
SingleTileScheduler.get_grid_shape |
4 | 2 | 0 |
meth |
SingleTileScheduler.get_current_work |
3 | 1 | 0 |
meth |
SingleTileScheduler.initial_work_tile_info |
3 | 0 | 0 |
meth |
SingleTileScheduler.prefetch_next_work |
3 | 0 | 0 |
meth |
SingleTileScheduler.advance_to_next_work |
3 | 0 | 0 |
meth |
SingleTileScheduler.extract_mlir_values |
1 | 0 | 0 |
meth |
SingleTileScheduler.new_from_mlir_values |
2 | 0 | 0 |
attr |
SingleTileScheduler.params |
1 | 0 | 0 |
meth |
SingleTileVarlenScheduler.init |
6 | 3 | 0 |
meth |
SingleTileVarlenScheduler.to_underlying_arguments |
4 | 2 | 0 |
meth |
SingleTileVarlenScheduler.create |
4 | 2 | 0 |
meth |
SingleTileVarlenScheduler.get_grid_shape |
4 | 2 | 0 |
meth |
SingleTileVarlenScheduler.get_current_work |
3 | 1 | 0 |
meth |
SingleTileVarlenScheduler.initial_work_tile_info |
3 | 0 | 0 |
meth |
SingleTileVarlenScheduler.prefetch_next_work |
3 | 0 | 0 |
meth |
SingleTileVarlenScheduler.advance_to_next_work |
3 | 0 | 0 |
meth |
SingleTileVarlenScheduler.extract_mlir_values |
1 | 0 | 0 |
meth |
SingleTileVarlenScheduler.new_from_mlir_values |
2 | 0 | 0 |
attr |
SingleTileVarlenScheduler.params |
1 | 0 | 0 |
meth |
Params.create |
4 | 2 | 0 |
meth |
SingleTileLPTBwdScheduler.init |
5 | 2 | 0 |
meth |
SingleTileLPTBwdScheduler.to_underlying_arguments |
4 | 2 | 0 |
meth |
SingleTileLPTBwdScheduler.create |
4 | 2 | 0 |
meth |
SingleTileLPTBwdScheduler.get_grid_shape |
4 | 2 | 0 |
meth |
SingleTileLPTBwdScheduler.get_current_work |
3 | 1 | 0 |
meth |
SingleTileLPTBwdScheduler.initial_work_tile_info |
3 | 0 | 0 |
meth |
SingleTileLPTBwdScheduler.prefetch_next_work |
3 | 0 | 0 |
meth |
SingleTileLPTBwdScheduler.advance_to_next_work |
3 | 0 | 0 |
meth |
SingleTileLPTBwdScheduler.extract_mlir_values |
1 | 0 | 0 |
meth |
SingleTileLPTBwdScheduler.new_from_mlir_values |
2 | 0 | 0 |
attr |
SingleTileLPTBwdScheduler.params |
1 | 0 | 0 |
meth |
StaticPersistentTileScheduler.init |
5 | 2 | 0 |
meth |
StaticPersistentTileScheduler.to_underlying_arguments |
4 | 2 | 0 |
meth |
StaticPersistentTileScheduler.create |
4 | 2 | 0 |
meth |
StaticPersistentTileScheduler.get_grid_shape |
4 | 2 | 0 |
meth |
StaticPersistentTileScheduler.get_current_work |
3 | 1 | 0 |
meth |
StaticPersistentTileScheduler.initial_work_tile_info |
3 | 0 | 0 |
meth |
StaticPersistentTileScheduler.prefetch_next_work |
3 | 0 | 0 |
meth |
StaticPersistentTileScheduler.advance_to_next_work |
3 | 0 | 0 |
meth |
StaticPersistentTileScheduler.extract_mlir_values |
1 | 0 | 0 |
meth |
StaticPersistentTileScheduler.new_from_mlir_values |
2 | 0 | 0 |
attr |
StaticPersistentTileScheduler.params |
1 | 0 | 0 |
vllm.vllm_flash_attn.cute.utils (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
add_round_down |
5 | 3 | 0 |
func |
fmax |
6 | 4 | 0 |
func |
ex2_emulation |
4 | 2 | 0 |
func |
ssa_to_scalar |
2 | 0 | 0 |
func |
convert_from_dlpack_leading_static |
6 | 1 | 0 |
func |
atomic_add_fp32 |
5 | 3 | 0 |
func |
evaluate_polynomial_2 |
6 | 4 | 0 |
func |
scalar_to_ssa |
3 | 2 | 0 |
func |
cvt_f16x2_f32 |
6 | 4 | 0 |
func |
domain_offset_aligned |
5 | 3 | 0 |
func |
create_softcap_scoremod |
2 | 0 | 0 |
func |
e2e_asm2 |
5 | 3 | 0 |
func |
hash_callable |
3 | 2 | 0 |
func |
shr_u32 |
5 | 3 | 0 |
func |
combine_int_frac_ex2 |
5 | 3 | 0 |
func |
ex2_emulation_2 |
5 | 3 | 0 |
func |
evaluate_polynomial |
5 | 3 | 0 |
func |
convert_from_dlpack |
5 | 1 | 0 |
func |
elem_pointer |
5 | 3 | 0 |
vllm.vllm_flash_attn.flash_attn_interface (84 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
flash_attn_varlen_func |
31 | 3 | 0 |
func |
get_scheduler_metadata |
21 | 5 | 0 |
func |
sparse_attn_varlen_func |
21 | 0 | 0 |
func |
sparse_attn_func |
17 | 0 | 0 |
func |
maybe_contiguous |
2 | 0 | 0 |
Type-Ignore Comments
| Flavor | Count |
|---|---|
type: ignore[call-arg] |
31 |
type: ignore[assignment] |
23 |
type: ignore |
21 |
type: ignore[misc] |
12 |
type: ignore[no-redef] |
5 |
type: ignore[override] |
5 |
type: ignore[arg-type] |
3 |
type: ignore[attr-defined] |
3 |
type: ignore[type-var] |
3 |
type: ignore[import-not-found] |
2 |
type: ignore[assignment, misc] |
1 |
type: ignore[misc, no-redef] |
1 |
type: ignore[name-defined] |
1 |